diff options
395 files changed, 82451 insertions, 0 deletions
diff --git a/ABI/pytalloc-util-2.0.6.sigs b/ABI/pytalloc-util-2.0.6.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.0.6.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.0.7.sigs b/ABI/pytalloc-util-2.0.7.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.0.7.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.0.8.sigs b/ABI/pytalloc-util-2.0.8.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.0.8.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.0.sigs b/ABI/pytalloc-util-2.1.0.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.1.0.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.1.sigs b/ABI/pytalloc-util-2.1.1.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.1.1.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.10.sigs b/ABI/pytalloc-util-2.1.10.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.10.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.11.sigs b/ABI/pytalloc-util-2.1.11.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.11.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.12.sigs b/ABI/pytalloc-util-2.1.12.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.12.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.13.sigs b/ABI/pytalloc-util-2.1.13.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.13.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.14.sigs b/ABI/pytalloc-util-2.1.14.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.14.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.15.sigs b/ABI/pytalloc-util-2.1.15.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.15.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.16.sigs b/ABI/pytalloc-util-2.1.16.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.16.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.2.sigs b/ABI/pytalloc-util-2.1.2.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.1.2.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.3.sigs b/ABI/pytalloc-util-2.1.3.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.1.3.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.4.sigs b/ABI/pytalloc-util-2.1.4.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.1.4.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.5.sigs b/ABI/pytalloc-util-2.1.5.sigs new file mode 100644 index 0000000..961c1a8 --- /dev/null +++ b/ABI/pytalloc-util-2.1.5.sigs @@ -0,0 +1,6 @@ +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.6.sigs b/ABI/pytalloc-util-2.1.6.sigs new file mode 100644 index 0000000..666fec0 --- /dev/null +++ b/ABI/pytalloc-util-2.1.6.sigs @@ -0,0 +1,13 @@ +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.7.sigs b/ABI/pytalloc-util-2.1.7.sigs new file mode 100644 index 0000000..666fec0 --- /dev/null +++ b/ABI/pytalloc-util-2.1.7.sigs @@ -0,0 +1,13 @@ +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.8.sigs b/ABI/pytalloc-util-2.1.8.sigs new file mode 100644 index 0000000..666fec0 --- /dev/null +++ b/ABI/pytalloc-util-2.1.8.sigs @@ -0,0 +1,13 @@ +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.1.9.sigs b/ABI/pytalloc-util-2.1.9.sigs new file mode 100644 index 0000000..9d4d4d1 --- /dev/null +++ b/ABI/pytalloc-util-2.1.9.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_CObject_FromTallocPtr: PyObject *(void *) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.2.0.sigs b/ABI/pytalloc-util-2.2.0.sigs new file mode 100644 index 0000000..62f066f --- /dev/null +++ b/ABI/pytalloc-util-2.2.0.sigs @@ -0,0 +1,15 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.3.0.sigs b/ABI/pytalloc-util-2.3.0.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.3.0.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.3.1.sigs b/ABI/pytalloc-util-2.3.1.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.3.1.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.3.2.sigs b/ABI/pytalloc-util-2.3.2.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.3.2.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.3.3.sigs b/ABI/pytalloc-util-2.3.3.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.3.3.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.3.4.sigs b/ABI/pytalloc-util-2.3.4.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.3.4.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.3.5.sigs b/ABI/pytalloc-util-2.3.5.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.3.5.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.4.0.sigs b/ABI/pytalloc-util-2.4.0.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.4.0.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.4.1.sigs b/ABI/pytalloc-util-2.4.1.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.4.1.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/pytalloc-util-2.4.2.sigs b/ABI/pytalloc-util-2.4.2.sigs new file mode 100644 index 0000000..6056577 --- /dev/null +++ b/ABI/pytalloc-util-2.4.2.sigs @@ -0,0 +1,16 @@ +_pytalloc_check_type: int (PyObject *, const char *) +_pytalloc_get_mem_ctx: TALLOC_CTX *(PyObject *) +_pytalloc_get_name: const char *(PyObject *) +_pytalloc_get_ptr: void *(PyObject *) +_pytalloc_get_type: void *(PyObject *, const char *) +pytalloc_BaseObject_PyType_Ready: int (PyTypeObject *) +pytalloc_BaseObject_check: int (PyObject *) +pytalloc_BaseObject_size: size_t (void) +pytalloc_Check: int (PyObject *) +pytalloc_GenericObject_reference_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GenericObject_steal_ex: PyObject *(TALLOC_CTX *, void *) +pytalloc_GetBaseObjectType: PyTypeObject *(void) +pytalloc_GetObjectType: PyTypeObject *(void) +pytalloc_reference_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) +pytalloc_steal: PyObject *(PyTypeObject *, void *) +pytalloc_steal_ex: PyObject *(PyTypeObject *, TALLOC_CTX *, void *) diff --git a/ABI/talloc-2.0.2.sigs b/ABI/talloc-2.0.2.sigs new file mode 100644 index 0000000..6e236d5 --- /dev/null +++ b/ABI/talloc-2.0.2.sigs @@ -0,0 +1,62 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.0.3.sigs b/ABI/talloc-2.0.3.sigs new file mode 100644 index 0000000..6e236d5 --- /dev/null +++ b/ABI/talloc-2.0.3.sigs @@ -0,0 +1,62 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.0.4.sigs b/ABI/talloc-2.0.4.sigs new file mode 100644 index 0000000..6e236d5 --- /dev/null +++ b/ABI/talloc-2.0.4.sigs @@ -0,0 +1,62 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.0.5.sigs b/ABI/talloc-2.0.5.sigs new file mode 100644 index 0000000..6e236d5 --- /dev/null +++ b/ABI/talloc-2.0.5.sigs @@ -0,0 +1,62 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.0.6.sigs b/ABI/talloc-2.0.6.sigs new file mode 100644 index 0000000..6e236d5 --- /dev/null +++ b/ABI/talloc-2.0.6.sigs @@ -0,0 +1,62 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.0.7.sigs b/ABI/talloc-2.0.7.sigs new file mode 100644 index 0000000..6e236d5 --- /dev/null +++ b/ABI/talloc-2.0.7.sigs @@ -0,0 +1,62 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.0.8.sigs b/ABI/talloc-2.0.8.sigs new file mode 100644 index 0000000..15a9e95 --- /dev/null +++ b/ABI/talloc-2.0.8.sigs @@ -0,0 +1,63 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.0.sigs b/ABI/talloc-2.1.0.sigs new file mode 100644 index 0000000..eae12cc --- /dev/null +++ b/ABI/talloc-2.1.0.sigs @@ -0,0 +1,64 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.1.sigs b/ABI/talloc-2.1.1.sigs new file mode 100644 index 0000000..eae12cc --- /dev/null +++ b/ABI/talloc-2.1.1.sigs @@ -0,0 +1,64 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.10.sigs b/ABI/talloc-2.1.10.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.10.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.11.sigs b/ABI/talloc-2.1.11.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.11.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.12.sigs b/ABI/talloc-2.1.12.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.12.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.13.sigs b/ABI/talloc-2.1.13.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.13.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.14.sigs b/ABI/talloc-2.1.14.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.14.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.15.sigs b/ABI/talloc-2.1.15.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.15.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.16.sigs b/ABI/talloc-2.1.16.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.16.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.2.sigs b/ABI/talloc-2.1.2.sigs new file mode 100644 index 0000000..eae12cc --- /dev/null +++ b/ABI/talloc-2.1.2.sigs @@ -0,0 +1,64 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.3.sigs b/ABI/talloc-2.1.3.sigs new file mode 100644 index 0000000..eae12cc --- /dev/null +++ b/ABI/talloc-2.1.3.sigs @@ -0,0 +1,64 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.4.sigs b/ABI/talloc-2.1.4.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.4.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.5.sigs b/ABI/talloc-2.1.5.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.5.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.6.sigs b/ABI/talloc-2.1.6.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.6.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.7.sigs b/ABI/talloc-2.1.7.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.7.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.8.sigs b/ABI/talloc-2.1.8.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.8.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.1.9.sigs b/ABI/talloc-2.1.9.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.1.9.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.2.0.sigs b/ABI/talloc-2.2.0.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.2.0.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.3.0.sigs b/ABI/talloc-2.3.0.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.3.0.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.3.1.sigs b/ABI/talloc-2.3.1.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.3.1.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.3.2.sigs b/ABI/talloc-2.3.2.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.3.2.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.3.3.sigs b/ABI/talloc-2.3.3.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.3.3.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.3.4.sigs b/ABI/talloc-2.3.4.sigs new file mode 100644 index 0000000..9969ce3 --- /dev/null +++ b/ABI/talloc-2.3.4.sigs @@ -0,0 +1,65 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.3.5.sigs b/ABI/talloc-2.3.5.sigs new file mode 100644 index 0000000..ec3cee4 --- /dev/null +++ b/ABI/talloc-2.3.5.sigs @@ -0,0 +1,66 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_addbuf: void (char **, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.4.0.sigs b/ABI/talloc-2.4.0.sigs new file mode 100644 index 0000000..ec3cee4 --- /dev/null +++ b/ABI/talloc-2.4.0.sigs @@ -0,0 +1,66 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_addbuf: void (char **, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.4.1.sigs b/ABI/talloc-2.4.1.sigs new file mode 100644 index 0000000..ec3cee4 --- /dev/null +++ b/ABI/talloc-2.4.1.sigs @@ -0,0 +1,66 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_addbuf: void (char **, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/ABI/talloc-2.4.2.sigs b/ABI/talloc-2.4.2.sigs new file mode 100644 index 0000000..ec3cee4 --- /dev/null +++ b/ABI/talloc-2.4.2.sigs @@ -0,0 +1,66 @@ +_talloc: void *(const void *, size_t) +_talloc_array: void *(const void *, size_t, unsigned int, const char *) +_talloc_free: int (void *, const char *) +_talloc_get_type_abort: void *(const void *, const char *, const char *) +_talloc_memdup: void *(const void *, const void *, size_t, const char *) +_talloc_move: void *(const void *, const void *) +_talloc_pooled_object: void *(const void *, size_t, const char *, unsigned int, size_t) +_talloc_realloc: void *(const void *, void *, size_t, const char *) +_talloc_realloc_array: void *(const void *, void *, size_t, unsigned int, const char *) +_talloc_reference_loc: void *(const void *, const void *, const char *) +_talloc_set_destructor: void (const void *, int (*)(void *)) +_talloc_steal_loc: void *(const void *, const void *, const char *) +_talloc_zero: void *(const void *, size_t, const char *) +_talloc_zero_array: void *(const void *, size_t, unsigned int, const char *) +talloc_asprintf: char *(const void *, const char *, ...) +talloc_asprintf_addbuf: void (char **, const char *, ...) +talloc_asprintf_append: char *(char *, const char *, ...) +talloc_asprintf_append_buffer: char *(char *, const char *, ...) +talloc_autofree_context: void *(void) +talloc_check_name: void *(const void *, const char *) +talloc_disable_null_tracking: void (void) +talloc_enable_leak_report: void (void) +talloc_enable_leak_report_full: void (void) +talloc_enable_null_tracking: void (void) +talloc_enable_null_tracking_no_autofree: void (void) +talloc_find_parent_byname: void *(const void *, const char *) +talloc_free_children: void (void *) +talloc_get_name: const char *(const void *) +talloc_get_size: size_t (const void *) +talloc_increase_ref_count: int (const void *) +talloc_init: void *(const char *, ...) +talloc_is_parent: int (const void *, const void *) +talloc_named: void *(const void *, size_t, const char *, ...) +talloc_named_const: void *(const void *, size_t, const char *) +talloc_parent: void *(const void *) +talloc_parent_name: const char *(const void *) +talloc_pool: void *(const void *, size_t) +talloc_realloc_fn: void *(const void *, void *, size_t) +talloc_reference_count: size_t (const void *) +talloc_reparent: void *(const void *, const void *, const void *) +talloc_report: void (const void *, FILE *) +talloc_report_depth_cb: void (const void *, int, int, void (*)(const void *, int, int, int, void *), void *) +talloc_report_depth_file: void (const void *, int, int, FILE *) +talloc_report_full: void (const void *, FILE *) +talloc_set_abort_fn: void (void (*)(const char *)) +talloc_set_log_fn: void (void (*)(const char *)) +talloc_set_log_stderr: void (void) +talloc_set_memlimit: int (const void *, size_t) +talloc_set_name: const char *(const void *, const char *, ...) +talloc_set_name_const: void (const void *, const char *) +talloc_show_parents: void (const void *, FILE *) +talloc_strdup: char *(const void *, const char *) +talloc_strdup_append: char *(char *, const char *) +talloc_strdup_append_buffer: char *(char *, const char *) +talloc_strndup: char *(const void *, const char *, size_t) +talloc_strndup_append: char *(char *, const char *, size_t) +talloc_strndup_append_buffer: char *(char *, const char *, size_t) +talloc_test_get_magic: int (void) +talloc_total_blocks: size_t (const void *) +talloc_total_size: size_t (const void *) +talloc_unlink: int (const void *, void *) +talloc_vasprintf: char *(const void *, const char *, va_list) +talloc_vasprintf_append: char *(char *, const char *, va_list) +talloc_vasprintf_append_buffer: char *(char *, const char *, va_list) +talloc_version_major: int (void) +talloc_version_minor: int (void) diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..ff8e585 --- /dev/null +++ b/Makefile @@ -0,0 +1,65 @@ +# simple makefile wrapper to run waf + +WAF_BIN=`PATH=buildtools/bin:../../buildtools/bin:$$PATH which waf` +WAF_BINARY=$(PYTHON) $(WAF_BIN) +WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) + +all: + $(WAF) build + +install: + $(WAF) install + +uninstall: + $(WAF) uninstall + +test: + $(WAF) test $(TEST_OPTIONS) + +testenv: + $(WAF) test --testenv $(TEST_OPTIONS) + +quicktest: + $(WAF) test --quick $(TEST_OPTIONS) + +dist: + touch .tmplock + WAFLOCK=.tmplock $(WAF) dist + +distcheck: + touch .tmplock + WAFLOCK=.tmplock $(WAF) distcheck + +clean: + $(WAF) clean + +distclean: + $(WAF) distclean + +reconfigure: configure + $(WAF) reconfigure + +show_waf_options: + $(WAF) --help + +# some compatibility make targets +everything: all + +testsuite: all + +check: test + +torture: all + +# this should do an install as well, once install is finished +installcheck: test + +etags: + $(WAF) etags + +ctags: + $(WAF) ctags + +bin/%:: FORCE + $(WAF) --targets=`basename $@` +FORCE: @@ -0,0 +1,13 @@ +1.0.1 26 May 2007 + + BUGS + + * Set name of correctly when using talloc_append_string() (metze) + + LICENSE + + * Change license of files in lib/replace to LGPL (was GPL). (jelmer) + +1.0.0 30 April 2007 + + Initial release. diff --git a/buildtools/README b/buildtools/README new file mode 100644 index 0000000..eab0382 --- /dev/null +++ b/buildtools/README @@ -0,0 +1,12 @@ +See http://code.google.com/p/waf/ for more information on waf + +You can get a svn copy of the upstream source with: + + svn checkout http://waf.googlecode.com/svn/trunk/ waf-read-only + +Samba currently uses waf 1.5, which can be found at: + + http://waf.googlecode.com/svn/branches/waf-1.5 + +To update the current copy of waf, use the update-waf.sh script in this +directory. diff --git a/buildtools/bin/waf b/buildtools/bin/waf new file mode 100755 index 0000000..0f70fa2 --- /dev/null +++ b/buildtools/bin/waf @@ -0,0 +1,166 @@ +#!/usr/bin/env python3 +# encoding: latin-1 +# Thomas Nagy, 2005-2018 +# +""" +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" + +import os, sys, inspect + +VERSION="2.0.26" +REVISION="x" +GIT="x" +INSTALL="x" +C1='x' +C2='x' +C3='x' +cwd = os.getcwd() +join = os.path.join + +if sys.hexversion<0x206000f: + raise ImportError('Python >= 2.6 is required to create the waf file') + +WAF='waf' +def b(x): + return x +if sys.hexversion>0x300000f: + WAF='waf3' + def b(x): + return x.encode() + +def err(m): + print(('\033[91mError: %s\033[0m' % m)) + sys.exit(1) + +def unpack_wafdir(dir, src): + f = open(src,'rb') + c = 'corrupt archive (%d)' + while 1: + line = f.readline() + if not line: err('run waf-light from a folder containing waflib') + if line == b('#==>\n'): + txt = f.readline() + if not txt: err(c % 1) + if f.readline() != b('#<==\n'): err(c % 2) + break + if not txt: err(c % 3) + txt = txt[1:-1].replace(b(C1), b('\n')).replace(b(C2), b('\r')).replace(b(C3), b('\x00')) + + import shutil, tarfile + try: shutil.rmtree(dir) + except OSError: pass + try: + for x in ('Tools', 'extras'): + os.makedirs(join(dir, 'waflib', x)) + except OSError: + err("Cannot unpack waf lib into %s\nMove waf in a writable directory" % dir) + + os.chdir(dir) + tmp = 't.bz2' + t = open(tmp,'wb') + try: t.write(txt) + finally: t.close() + + try: + t = tarfile.open(tmp) + except: + try: + os.system('bunzip2 t.bz2') + t = tarfile.open('t') + tmp = 't' + except: + os.chdir(cwd) + try: shutil.rmtree(dir) + except OSError: pass + err("Waf cannot be unpacked, check that bzip2 support is present") + + try: + for x in t: t.extract(x) + finally: + t.close() + + for x in ('Tools', 'extras'): + os.chmod(join('waflib',x), 493) + + if sys.hexversion<0x300000f: + sys.path = [join(dir, 'waflib')] + sys.path + import fixpy2 + fixpy2.fixdir(dir) + + os.remove(tmp) + os.chdir(cwd) + + try: dir = unicode(dir, 'mbcs') + except: pass + try: + from ctypes import windll + windll.kernel32.SetFileAttributesW(dir, 2) + except: + pass + +def test(dir): + try: + os.stat(join(dir, 'waflib')) + return os.path.abspath(dir) + except OSError: + pass + +def find_lib(): + path = '../../third_party/waf' + paths = [path, path+'/waflib'] + return [os.path.abspath(os.path.join(os.path.dirname(__file__), x)) for x in paths] + +wafdir = find_lib() +for p in wafdir: + sys.path.insert(0, p) + +if __name__ == '__main__': + #import extras.compat15#PRELUDE + import sys + + from waflib.Tools import ccroot, c, ar, compiler_c, gcc + sys.modules['cc'] = c + sys.modules['ccroot'] = ccroot + sys.modules['ar'] = ar + sys.modules['compiler_cc'] = compiler_c + sys.modules['gcc'] = gcc + + from waflib import Options + Options.lockfile = os.environ.get('WAFLOCK', '.lock-wscript') + if os.path.isfile(Options.lockfile) and os.stat(Options.lockfile).st_size == 0: + os.environ['NOCLIMB'] = "1" + # there is a single top-level, but libraries must build independently + os.environ['NO_LOCK_IN_TOP'] = "1" + + from waflib import Task + class o(object): + display = None + Task.classes['cc_link'] = o + + from waflib import Scripting + Scripting.waf_entry_point(cwd, VERSION, wafdir[0]) diff --git a/buildtools/compare_config_h4.sh b/buildtools/compare_config_h4.sh new file mode 100755 index 0000000..fee8abf --- /dev/null +++ b/buildtools/compare_config_h4.sh @@ -0,0 +1,12 @@ +#!/bin/sh + +# compare the generated config.h from a waf build with existing samba +# build + +grep "^.define" bin/default/source4/include/config.h | sort >waf-config.h +grep "^.define" $HOME/samba_old/source4/include/config.h | sort >old-config.h + +comm -23 old-config.h waf-config.h + +#echo +#diff -u old-config.h waf-config.h diff --git a/buildtools/compare_generated.sh b/buildtools/compare_generated.sh new file mode 100755 index 0000000..e626579 --- /dev/null +++ b/buildtools/compare_generated.sh @@ -0,0 +1,49 @@ +#!/bin/sh + +# compare the generated files from a waf + +old_build=$HOME/samba_old + +gen_files=$(cd bin/default && find . -type f -name '*.[ch]') + +2>&1 + +strip_file() +{ + in_file=$1 + out_file=$2 + cat $in_file | + grep -v 'The following definitions come from' | + grep -v 'Automatically generated at' | + grep -v 'Generated from' | + sed 's|/home/tnagy/samba/source4||g' | + sed 's|/home/tnagy/samba/|../|g' | + sed 's|bin/default/source4/||g' | + sed 's|bin/default/|../|g' | + sed 's/define _____/define ___/g' | + sed 's/define __*/define _/g' | + sed 's/define _DEFAULT_/define _/g' | + sed 's/define _SOURCE4_/define ___/g' | + sed 's/define ___/define _/g' | + sed 's/ifndef ___/ifndef _/g' | + sed 's|endif /* ____|endif /* __|g' | + sed s/__DEFAULT_SOURCE4/__/ | + sed s/__DEFAULT_SOURCE4/__/ | + sed s/__DEFAULT/____/ >$out_file +} + +compare_file() +{ + f=$f + bname=$(basename $f) + t1=/tmp/$bname.old.$$ + t2=/tmp/$bname.new.$$ + strip_file $old_build/$f $t1 + strip_file bin/default/$f $t2 + diff -u -b $t1 $t2 2>&1 + rm -f $t1 $t2 +} + +for f in $gen_files; do + compare_file $f +done diff --git a/buildtools/compare_install.sh b/buildtools/compare_install.sh new file mode 100755 index 0000000..37772a4 --- /dev/null +++ b/buildtools/compare_install.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +prefix1="$1" +prefix2="$2" + +(cd $prefix1 && find .) | sort >p1.txt +(cd $prefix2 && find .) | sort >p2.txt +diff -u p[12].txt diff --git a/buildtools/devel_env.sh b/buildtools/devel_env.sh new file mode 100644 index 0000000..9f87a4a --- /dev/null +++ b/buildtools/devel_env.sh @@ -0,0 +1,7 @@ +# This file can be sourced using +# +# source buildtools/devel_env.sh + +# Setup python path for lsp server +PYTHONPATH="$(pwd)/third_party/waf:$(pwd)/python:$(pwd)/bin/python:$(pwd)/selftest:${PYTHONPATH}" +export PYTHONPATH diff --git a/buildtools/examples/run_on_target.py b/buildtools/examples/run_on_target.py new file mode 100755 index 0000000..0c9ac5a --- /dev/null +++ b/buildtools/examples/run_on_target.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +# +# Sample run-on-target script +# This is a script that can be used as cross-execute parameter to samba +# configuration process, running the command on a remote target for which +# the cross-compiled configure test was compiled. +# +# To use: +# ./configure \ +# --cross-compile \ +# '--cross-execute=./buildtools/example/run_on_target.py --host=<host>' +# +# A more elaborate example: +# ./configure \ +# --cross-compile \ +# '--cross-execute=./buildtools/example/run_on_target.py --host=<host> --user=<user> "--ssh=ssh -i <some key file>" --destdir=/path/to/dir' +# +# Typically this is to be used also with --cross-answers, so that the +# cross answers file gets built and further builds can be made without +# the help of a remote target. +# +# The following assumptions are made: +# 1. rsync is available on build machine and target machine +# 2. A running ssh service on target machine with password-less shell login +# 3. A directory writable by the password-less login user +# 4. The tests on the target can run and provide reliable results +# from the login account's home directory. This is significant +# for example in locking tests which +# create files in the current directory. As a workaround to this +# assumption, the TESTDIR environment variable can be set on the target +# (using ssh command line or server config) and the tests shall +# chdir to that directory. +# + +import sys +import os +import subprocess +from optparse import OptionParser + +# these are defaults, but can be overridden using command line +SSH = 'ssh' +USER = None +HOST = 'localhost' + + +def xfer_files(ssh, srcdir, host, user, targ_destdir): + """Transfer executable files to target + + Use rsync to copy the directory containing program to run + INTO a destination directory on the target. An exact copy + of the source directory is created on the target machine, + possibly deleting files on the target machine which do not + exist on the source directory. + + The idea is that the test may include files in addition to + the compiled binary, and all of those files reside alongside + the binary in a source directory. + + For example, if the test to run is /foo/bar/test and the + destination directory on the target is /tbaz, then /tbaz/bar + on the target shall be an exact copy of /foo/bar on the source, + including deletion of files inside /tbaz/bar which do not exist + on the source. + """ + + userhost = host + if user: + userhost = '%s@%s' % (user, host) + + cmd = 'rsync --verbose -rl --ignore-times --delete -e "%s" %s %s:%s/' % \ + (ssh, srcdir, userhost, targ_destdir) + p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (out, err) = p.communicate() + if p.returncode != 0: + raise Exception('failed syncing files\n stdout:\n%s\nstderr:%s\n' + % (out, err)) + + +def exec_remote(ssh, host, user, destdir, targdir, prog, args): + """Run a test on the target + + Using password-less ssh, run the compiled binary on the target. + + An assumption is that there's no need to cd into the target dir, + same as there's no need to do it on a native build. + """ + userhost = host + if user: + userhost = '%s@%s' % (user, host) + + cmd = '%s %s %s/%s/%s' % (ssh, userhost, destdir, targdir, prog) + if args: + cmd = cmd + ' ' + ' '.join(args) + p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + (out, err) = p.communicate() + return (p.returncode, out) + + +def main(argv): + usage = "usage: %prog [options] <prog> [args]" + parser = OptionParser(usage) + + parser.add_option('--ssh', help="SSH client and additional flags", + default=SSH) + parser.add_option('--host', help="target host name or IP address", + default=HOST) + parser.add_option('--user', help="login user on target", + default=USER) + parser.add_option('--destdir', help="work directory on target", + default='~') + + (options, args) = parser.parse_args(argv) + if len(args) < 1: + parser.error("please supply test program to run") + + progpath = args[0] + + # assume that a test that was not compiled fails (e.g. getconf) + if progpath[0] != '/': + return (1, "") + + progdir = os.path.dirname(progpath) + prog = os.path.basename(progpath) + targ_progdir = os.path.basename(progdir) + + xfer_files( + options.ssh, + progdir, + options.host, + options.user, + options.destdir) + + (rc, out) = exec_remote(options.ssh, + options.host, + options.user, + options.destdir, + targ_progdir, + prog, args[1:]) + return (rc, out) + + +if __name__ == '__main__': + (rc, out) = main(sys.argv[1:]) + sys.stdout.write(out) + sys.exit(rc) diff --git a/buildtools/scripts/Makefile.waf b/buildtools/scripts/Makefile.waf new file mode 100644 index 0000000..a15a5f8 --- /dev/null +++ b/buildtools/scripts/Makefile.waf @@ -0,0 +1,72 @@ +# simple makefile wrapper to run waf + +WAF_BINARY=$(PYTHON) BUILDTOOLS/bin/waf +WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) + +all: + $(WAF) build + +install: + $(WAF) install + +uninstall: + $(WAF) uninstall + +test: + $(WAF) test $(TEST_OPTIONS) + +help: + @echo NOTE: to run extended waf options use $(WAF_BINARY) or modify your PATH + $(WAF) --help + +testenv: + $(WAF) test --testenv $(TEST_OPTIONS) + +quicktest: + $(WAF) test --quick $(TEST_OPTIONS) + +dist: + $(WAF) dist + +distcheck: + $(WAF) distcheck + +clean: + $(WAF) clean + +distclean: + $(WAF) distclean + +reconfigure: configure + $(WAF) reconfigure + +show_waf_options: + $(WAF) --help + +# some compatibility make targets +everything: all + +testsuite: all + +check: test + +torture: all + +# this should do an install as well, once install is finished +installcheck: test + +etags: + $(WAF) etags + +ctags: + $(WAF) ctags + +bin/%:: FORCE + $(WAF) --targets=$@ +FORCE: + +configure: autogen-waf.sh BUILDTOOLS/scripts/configure.waf + ./autogen-waf.sh + +Makefile: autogen-waf.sh configure BUILDTOOLS/scripts/Makefile.waf + ./autogen-waf.sh diff --git a/buildtools/scripts/abi_gen.sh b/buildtools/scripts/abi_gen.sh new file mode 100755 index 0000000..c66a1b8 --- /dev/null +++ b/buildtools/scripts/abi_gen.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# generate a set of ABI signatures from a shared library + +SHAREDLIB="$1" + +GDBSCRIPT="gdb_syms.$$" + +( + cat <<EOF +set height 0 +set width 0 +EOF + + # On older linker versions _init|_fini symbols are not hidden. + objdump --dynamic-syms "${SHAREDLIB}" | + awk '$0 !~ /.hidden/ {if ($2 == "g" && $3 ~ /D(F|O)/ && $4 ~ /(.bss|.rodata|.text)/) print $NF}' | + sort | + while read -r s; do + echo "echo $s: " + echo p "${s}" + done +) >$GDBSCRIPT + +# forcing the terminal avoids a problem on Fedora12 +TERM=none gdb -n -batch -x $GDBSCRIPT "$SHAREDLIB" </dev/null +rm -f $GDBSCRIPT diff --git a/buildtools/scripts/autogen-waf.sh b/buildtools/scripts/autogen-waf.sh new file mode 100755 index 0000000..a0ed80c --- /dev/null +++ b/buildtools/scripts/autogen-waf.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +p=$(dirname $0) + +echo "Setting up for waf build" + +echo "Looking for the buildtools directory" + +d="buildtools" +while test \! -d "$p/$d"; do d="../$d"; done + +echo "Found buildtools in $p/$d" + +echo "Setting up configure" +rm -f $p/configure $p/include/config*.h* +sed "s|BUILDTOOLS|$d|g;s|BUILDPATH|$p|g" <"$p/$d/scripts/configure.waf" >$p/configure +chmod +x $p/configure + +echo "Setting up Makefile" +rm -f $p/makefile $p/Makefile +sed "s|BUILDTOOLS|$d|g" <"$p/$d/scripts/Makefile.waf" >$p/Makefile + +echo "done. Now run $p/configure or $p/configure.developer then make." +if [ $p != "." ]; then + echo "Notice: The build invoke path is not 'source4'! Use make with the parameter" + echo "-C <'source4' path>. Example: make -C source4 all" +fi diff --git a/buildtools/scripts/configure.waf b/buildtools/scripts/configure.waf new file mode 100755 index 0000000..ccb6284 --- /dev/null +++ b/buildtools/scripts/configure.waf @@ -0,0 +1,21 @@ +#!/bin/sh + +PREVPATH=$(dirname $0) + +WAF=BUILDTOOLS/bin/waf + +# using JOBS=1 gives maximum compatibility with +# systems like AIX which have broken threading in python +JOBS=1 +export JOBS + +# Make sure we don't have any library preloaded. +unset LD_PRELOAD + +# Make sure we get stable hashes +PYTHONHASHSEED=1 +export PYTHONHASHSEED + +cd BUILDPATH || exit 1 +$PYTHON $WAF configure "$@" || exit 1 +cd $PREVPATH diff --git a/buildtools/wafsamba/README b/buildtools/wafsamba/README new file mode 100644 index 0000000..1968b55 --- /dev/null +++ b/buildtools/wafsamba/README @@ -0,0 +1,8 @@ +This is a set of waf 'tools' to help make building the Samba +components easier, by having common functions in one place. This gives +us a more consistent build, and ensures that our project rules are +obeyed + + +TODO: + see http://wiki.samba.org/index.php/Waf diff --git a/buildtools/wafsamba/__init__.py b/buildtools/wafsamba/__init__.py new file mode 100644 index 0000000..e69de29 --- /dev/null +++ b/buildtools/wafsamba/__init__.py diff --git a/buildtools/wafsamba/configure_file.py b/buildtools/wafsamba/configure_file.py new file mode 100644 index 0000000..98a58a4 --- /dev/null +++ b/buildtools/wafsamba/configure_file.py @@ -0,0 +1,41 @@ +# handle substitution of variables in .in files + +import sys +import re +import os +from waflib import Build, Logs +from samba_utils import SUBST_VARS_RECURSIVE + +def subst_at_vars(task): + '''substiture @VAR@ style variables in a file''' + + env = task.env + s = task.inputs[0].read() + + # split on the vars + a = re.split(r'(@\w+@)', s) + out = [] + for v in a: + if re.match(r'@\w+@', v): + vname = v[1:-1] + if not vname in task.env and vname.upper() in task.env: + vname = vname.upper() + if not vname in task.env: + Logs.error("Unknown substitution %s in %s" % (v, task.name)) + sys.exit(1) + v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) + out.append(v) + contents = ''.join(out) + task.outputs[0].write(contents) + return 0 + +def CONFIGURE_FILE(bld, in_file, **kwargs): + '''configure file''' + + base=os.path.basename(in_file) + t = bld.SAMBA_GENERATOR('INFILE_%s' % base, + rule = subst_at_vars, + source = in_file + '.in', + target = in_file, + vars = kwargs) +Build.BuildContext.CONFIGURE_FILE = CONFIGURE_FILE diff --git a/buildtools/wafsamba/generic_cc.py b/buildtools/wafsamba/generic_cc.py new file mode 100644 index 0000000..1352c54 --- /dev/null +++ b/buildtools/wafsamba/generic_cc.py @@ -0,0 +1,70 @@ + +# compiler definition for a generic C compiler +# based on suncc.py from waf + +import os, optparse +from waflib import Errors +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +# +# Let waflib provide useful defaults, but +# provide generic_cc as last resort fallback on +# all platforms +# +from waflib.Tools.compiler_c import c_compiler +for key in c_compiler.keys(): + c_compiler[key].append('generic_cc') + +@conf +def find_generic_cc(conf): + v = conf.env + cc = None + if v.CC: + cc = v.CC + elif 'CC' in conf.environ: + cc = conf.environ['CC'] + if not cc: + cc = conf.find_program('cc', var='CC') + if not cc: + conf.fatal('generic_cc was not found') + + try: + conf.cmd_and_log(cc + ['--version']) + except Errors.WafError: + conf.fatal('%r --version could not be executed' % cc) + + v.CC = cc + v.CC_NAME = 'generic_cc' + +@conf +def generic_cc_common_flags(conf): + v = conf.env + + v.CC_SRC_F = '' + v.CC_TGT_F = ['-c', '-o'] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + if not v.LINK_CC: + v.LINK_CC = v.CC + + v.CCLNK_SRC_F = '' + v.CCLNK_TGT_F = ['-o'] + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + + v.cprogram_PATTERN = '%s' + v.cshlib_PATTERN = 'lib%s.so' + v.cstlib_PATTERN = 'lib%s.a' + +def configure(conf): + conf.find_generic_cc() + conf.find_ar() + conf.generic_cc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() diff --git a/buildtools/wafsamba/pkgconfig.py b/buildtools/wafsamba/pkgconfig.py new file mode 100644 index 0000000..417bf03 --- /dev/null +++ b/buildtools/wafsamba/pkgconfig.py @@ -0,0 +1,68 @@ +# handle substitution of variables in pc files + +import os, re, sys +from waflib import Build, Logs +from samba_utils import SUBST_VARS_RECURSIVE, TO_LIST + +def subst_at_vars(task): + '''substitute @VAR@ style variables in a file''' + + s = task.inputs[0].read() + # split on the vars + a = re.split(r'(@\w+@)', s) + out = [] + done_var = {} + back_sub = [ ('PREFIX', '${prefix}'), ('EXEC_PREFIX', '${exec_prefix}')] + for v in a: + if re.match(r'@\w+@', v): + vname = v[1:-1] + if not vname in task.env and vname.upper() in task.env: + vname = vname.upper() + if not vname in task.env: + Logs.error("Unknown substitution %s in %s" % (v, task.name)) + sys.exit(1) + v = SUBST_VARS_RECURSIVE(task.env[vname], task.env) + # now we back substitute the allowed pc vars + for (b, m) in back_sub: + s = task.env[b] + if s == v[0:len(s)]: + if not b in done_var: + # we don't want to substitute the first usage + done_var[b] = True + else: + v = m + v[len(s):] + break + out.append(v) + contents = ''.join(out) + task.outputs[0].write(contents) + return 0 + + +def PKG_CONFIG_FILES(bld, pc_files, vnum=None, extra_name=None): + '''install some pkg_config pc files''' + dest = '${PKGCONFIGDIR}' + dest = bld.EXPAND_VARIABLES(dest) + for f in TO_LIST(pc_files): + if extra_name: + target = f.split('.pc')[0] + extra_name + ".pc" + else: + target = f + base=os.path.basename(target) + t = bld.SAMBA_GENERATOR('PKGCONFIG_%s' % base, + rule=subst_at_vars, + source=f+'.in', + target=target) + bld.add_manual_dependency(bld.path.find_or_declare(f), bld.env['PREFIX'].encode('utf8')) + t.vars = [] + if t.env.RPATH_ON_INSTALL: + t.env.LIB_RPATH = t.env.RPATH_ST % t.env.LIBDIR + else: + t.env.LIB_RPATH = '' + if vnum: + t.env.PACKAGE_VERSION = vnum + for v in [ 'PREFIX', 'EXEC_PREFIX', 'LIB_RPATH' ]: + t.vars.append(t.env[v]) + bld.INSTALL_FILES(dest, target, flat=True, destname=base) +Build.BuildContext.PKG_CONFIG_FILES = PKG_CONFIG_FILES + + diff --git a/buildtools/wafsamba/samba3.py b/buildtools/wafsamba/samba3.py new file mode 100644 index 0000000..227ee27 --- /dev/null +++ b/buildtools/wafsamba/samba3.py @@ -0,0 +1,111 @@ +# a waf tool to add autoconf-like macros to the configure section +# and for SAMBA_ macros for building libraries, binaries etc + +import os +from waflib import Build +from samba_utils import TO_LIST +from samba_autoconf import library_flags + +def SAMBA3_IS_STATIC_MODULE(bld, module): + '''Check whether module is in static list''' + if module in bld.env['static_modules']: + return True + return False +Build.BuildContext.SAMBA3_IS_STATIC_MODULE = SAMBA3_IS_STATIC_MODULE + +def SAMBA3_IS_SHARED_MODULE(bld, module): + '''Check whether module is in shared list''' + if module in bld.env['shared_modules']: + return True + return False +Build.BuildContext.SAMBA3_IS_SHARED_MODULE = SAMBA3_IS_SHARED_MODULE + +def SAMBA3_IS_ENABLED_MODULE(bld, module): + '''Check whether module is in either shared or static list ''' + return SAMBA3_IS_STATIC_MODULE(bld, module) or SAMBA3_IS_SHARED_MODULE(bld, module) +Build.BuildContext.SAMBA3_IS_ENABLED_MODULE = SAMBA3_IS_ENABLED_MODULE + + + +def s3_fix_kwargs(bld, kwargs): + '''fix the build arguments for s3 build rules to include the + necessary includes, subdir and cflags options ''' + s3dir = os.path.join(bld.env.srcdir, 'source3') + s3reldir = os.path.relpath(s3dir, bld.path.abspath()) + + # the extra_includes list is relative to the source3 directory + extra_includes = [ '.', 'include', 'lib' ] + # local heimdal paths must only be included when using our embedded Heimdal + if bld.CONFIG_SET("USING_EMBEDDED_HEIMDAL"): + extra_includes += [ '../third_party/heimdal/lib/com_err', + '../third_party/heimdal/lib/base', + '../third_party/heimdal/lib/krb5', + '../third_party/heimdal/lib/gssapi/gssapi', + '../third_party/heimdal_build/include', + '../bin/default/third_party/heimdal/lib/asn1' ] + + if bld.CONFIG_SET('USING_SYSTEM_TDB'): + (tdb_includes, tdb_ldflags, tdb_cpppath) = library_flags(bld, 'tdb') + extra_includes += tdb_cpppath + else: + extra_includes += [ '../lib/tdb/include' ] + + if bld.CONFIG_SET('USING_SYSTEM_TEVENT'): + (tevent_includes, tevent_ldflags, tevent_cpppath) = library_flags(bld, 'tevent') + extra_includes += tevent_cpppath + else: + extra_includes += [ '../lib/tevent' ] + + if bld.CONFIG_SET('USING_SYSTEM_TALLOC'): + (talloc_includes, talloc_ldflags, talloc_cpppath) = library_flags(bld, 'talloc') + extra_includes += talloc_cpppath + else: + extra_includes += [ '../lib/talloc' ] + + if bld.CONFIG_SET('USING_SYSTEM_POPT'): + (popt_includes, popt_ldflags, popt_cpppath) = library_flags(bld, 'popt') + extra_includes += popt_cpppath + else: + extra_includes += [ '../lib/popt' ] + + # s3 builds assume that they will have a bunch of extra include paths + includes = [] + for d in extra_includes: + includes += [ os.path.join(s3reldir, d) ] + + # the rule may already have some includes listed + if 'includes' in kwargs: + includes += TO_LIST(kwargs['includes']) + kwargs['includes'] = includes + +# these wrappers allow for mixing of S3 and S4 build rules in the one build + +def SAMBA3_LIBRARY(bld, name, *args, **kwargs): + s3_fix_kwargs(bld, kwargs) + return bld.SAMBA_LIBRARY(name, *args, **kwargs) +Build.BuildContext.SAMBA3_LIBRARY = SAMBA3_LIBRARY + +def SAMBA3_PLUGIN(bld, name, *args, **kwargs): + s3_fix_kwargs(bld, kwargs) + return bld.SAMBA_PLUGIN(name, *args, **kwargs) +Build.BuildContext.SAMBA3_PLUGIN = SAMBA3_PLUGIN + +def SAMBA3_MODULE(bld, name, *args, **kwargs): + s3_fix_kwargs(bld, kwargs) + return bld.SAMBA_MODULE(name, *args, **kwargs) +Build.BuildContext.SAMBA3_MODULE = SAMBA3_MODULE + +def SAMBA3_SUBSYSTEM(bld, name, *args, **kwargs): + s3_fix_kwargs(bld, kwargs) + return bld.SAMBA_SUBSYSTEM(name, *args, **kwargs) +Build.BuildContext.SAMBA3_SUBSYSTEM = SAMBA3_SUBSYSTEM + +def SAMBA3_BINARY(bld, name, *args, **kwargs): + s3_fix_kwargs(bld, kwargs) + return bld.SAMBA_BINARY(name, *args, **kwargs) +Build.BuildContext.SAMBA3_BINARY = SAMBA3_BINARY + +def SAMBA3_PYTHON(bld, name, *args, **kwargs): + s3_fix_kwargs(bld, kwargs) + return bld.SAMBA_PYTHON(name, *args, **kwargs) +Build.BuildContext.SAMBA3_PYTHON = SAMBA3_PYTHON diff --git a/buildtools/wafsamba/samba_abi.py b/buildtools/wafsamba/samba_abi.py new file mode 100644 index 0000000..2d9505d --- /dev/null +++ b/buildtools/wafsamba/samba_abi.py @@ -0,0 +1,334 @@ +# functions for handling ABI checking of libraries + +import os +import sys +import re +import fnmatch + +from waflib import Options, Utils, Logs, Task, Build, Errors +from waflib.TaskGen import feature, before, after +from wafsamba import samba_utils + +# these type maps cope with platform specific names for common types +# please add new type mappings into the list below +abi_type_maps = { + '_Bool' : 'bool', + 'struct __va_list_tag *' : 'va_list' + } + +version_key = lambda x: list(map(int, x.split("."))) + +def normalise_signature(sig): + '''normalise a signature from gdb''' + sig = sig.strip() + sig = re.sub(r'^\$[0-9]+\s=\s\{(.+)\}$', r'\1', sig) + sig = re.sub(r'^\$[0-9]+\s=\s\{(.+)\}(\s0x[0-9a-f]+\s<\w+>)+$', r'\1', sig) + sig = re.sub(r'^\$[0-9]+\s=\s(0x[0-9a-f]+)\s?(<\w+>)?$', r'\1', sig) + sig = re.sub(r'0x[0-9a-f]+', '0xXXXX', sig) + sig = re.sub('", <incomplete sequence (\\\\[a-z0-9]+)>', r'\1"', sig) + + for t in abi_type_maps: + # we need to cope with non-word characters in mapped types + m = t + m = m.replace('*', r'\*') + if m[-1].isalnum() or m[-1] == '_': + m += '\\b' + if m[0].isalnum() or m[0] == '_': + m = '\\b' + m + sig = re.sub(m, abi_type_maps[t], sig) + return sig + + +def normalise_varargs(sig): + '''cope with older versions of gdb''' + sig = re.sub(r',\s\.\.\.', '', sig) + # Make sure we compare bytes and not strings + return bytes(sig, encoding='utf-8').decode('unicode_escape') + + +def parse_sigs(sigs, abi_match): + '''parse ABI signatures file''' + abi_match = samba_utils.TO_LIST(abi_match) + ret = {} + a = sigs.split('\n') + for s in a: + if s.find(':') == -1: + continue + sa = s.split(':') + if abi_match: + matched = False + negative = False + for p in abi_match: + if p[0] == '!' and fnmatch.fnmatch(sa[0], p[1:]): + negative = True + break + elif fnmatch.fnmatch(sa[0], p): + matched = True + break + if (not matched) and negative: + continue + Logs.debug("%s -> %s" % (sa[1], normalise_signature(sa[1]))) + ret[sa[0]] = normalise_signature(sa[1]) + return ret + +def save_sigs(sig_file, parsed_sigs): + '''save ABI signatures to a file''' + sigs = "".join('%s: %s\n' % (s, parsed_sigs[s]) for s in sorted(parsed_sigs.keys())) + return samba_utils.save_file(sig_file, sigs, create_dir=True) + + +def abi_check_task(self): + '''check if the ABI has changed''' + abi_gen = self.ABI_GEN + + libpath = self.inputs[0].abspath(self.env) + libname = os.path.basename(libpath) + + sigs = samba_utils.get_string(Utils.cmd_output([abi_gen, libpath])) + parsed_sigs = parse_sigs(sigs, self.ABI_MATCH) + + sig_file = self.ABI_FILE + + old_sigs = samba_utils.load_file(sig_file) + if old_sigs is None or Options.options.ABI_UPDATE: + if not save_sigs(sig_file, parsed_sigs): + raise Errors.WafError('Failed to save ABI file "%s"' % sig_file) + Logs.warn('Generated ABI signatures %s' % sig_file) + return + + parsed_old_sigs = parse_sigs(old_sigs, self.ABI_MATCH) + + # check all old sigs + got_error = False + for s in parsed_old_sigs: + if not s in parsed_sigs: + Logs.error('%s: symbol %s has been removed - please update major version\n\tsignature: %s' % ( + libname, s, parsed_old_sigs[s])) + got_error = True + elif normalise_varargs(parsed_old_sigs[s]) != normalise_varargs(parsed_sigs[s]): + Logs.error('%s: symbol %s has changed - please update major version\n\told_signature: %s\n\tnew_signature: %s' % ( + libname, s, parsed_old_sigs[s], parsed_sigs[s])) + got_error = True + + for s in parsed_sigs: + if not s in parsed_old_sigs: + Logs.error('%s: symbol %s has been added - please mark it _PRIVATE_ or update minor version\n\tsignature: %s' % ( + libname, s, parsed_sigs[s])) + got_error = True + + if got_error: + raise Errors.WafError('ABI for %s has changed - please fix library version then build with --abi-update\nSee http://wiki.samba.org/index.php/Waf#ABI_Checking for more information\nIf you have not changed any ABI, and your platform always gives this error, please configure with --abi-check-disable to skip this check' % libname) + + +t = Task.task_factory('abi_check', abi_check_task, color='BLUE', ext_in='.bin') +t.quiet = True +# allow "waf --abi-check" to force re-checking the ABI +if '--abi-check' in sys.argv: + t.always_run = True + +@after('apply_link') +@feature('abi_check') +def abi_check(self): + '''check that ABI matches saved signatures''' + env = self.bld.env + if not env.ABI_CHECK or self.abi_directory is None: + return + + # if the platform doesn't support -fvisibility=hidden then the ABI + # checks become fairly meaningless + if not env.HAVE_VISIBILITY_ATTR: + return + + topsrc = self.bld.srcnode.abspath() + abi_gen = os.path.join(topsrc, 'buildtools/scripts/abi_gen.sh') + + abi_file = "%s/%s-%s.sigs" % (self.abi_directory, self.version_libname, + self.abi_vnum) + + tsk = self.create_task('abi_check', self.link_task.outputs[0]) + tsk.ABI_FILE = abi_file + tsk.ABI_MATCH = self.abi_match + tsk.ABI_GEN = abi_gen + + +def abi_process_file(fname, version, symmap): + '''process one ABI file, adding new symbols to the symmap''' + for line in Utils.readf(fname).splitlines(): + symname = line.split(":")[0] + if not symname in symmap: + symmap[symname] = version + +def version_script_map_process_file(fname, version, abi_match): + '''process one standard version_script file, adding the symbols to the + abi_match''' + in_section = False + in_global = False + in_local = False + for _line in Utils.readf(fname).splitlines(): + line = _line.strip() + if line == "": + continue + if line.startswith("#"): + continue + if line.endswith(" {"): + in_section = True + continue + if line == "};": + assert in_section + in_section = False + in_global = False + in_local = False + continue + if not in_section: + continue + if line == "global:": + in_global = True + in_local = False + continue + if line == "local:": + in_global = False + in_local = True + continue + + symname = line.split(";")[0] + assert symname != "" + if in_local: + if symname == "*": + continue + symname = "!%s" % symname + if not symname in abi_match: + abi_match.append(symname) + +def abi_write_vscript(f, libname, current_version, versions, symmap, abi_match): + """Write a vscript file for a library in --version-script format. + + :param f: File-like object to write to + :param libname: Name of the library, uppercased + :param current_version: Current version + :param versions: Versions to consider + :param symmap: Dictionary mapping symbols -> version + :param abi_match: List of symbols considered to be public in the current + version + """ + + invmap = {} + for s in symmap: + invmap.setdefault(symmap[s], []).append(s) + + last_key = "" + versions = sorted(versions, key=version_key) + for k in versions: + symver = "%s_%s" % (libname, k) + if symver == current_version: + break + f.write("%s {\n" % symver) + if k in sorted(invmap.keys()): + f.write("\tglobal:\n") + for s in invmap.get(k, []): + f.write("\t\t%s;\n" % s) + f.write("}%s;\n\n" % last_key) + last_key = " %s" % symver + f.write("%s {\n" % current_version) + local_abi = list(filter(lambda x: x[0] == '!', abi_match)) + global_abi = list(filter(lambda x: x[0] != '!', abi_match)) + f.write("\tglobal:\n") + if len(global_abi) > 0: + for x in global_abi: + f.write("\t\t%s;\n" % x) + else: + f.write("\t\t*;\n") + # Always hide symbols that must be local if exist + local_abi.extend(["!_end", "!__bss_start", "!_edata"]) + f.write("\tlocal:\n") + for x in local_abi: + f.write("\t\t%s;\n" % x[1:]) + if global_abi != ["*"]: + if len(global_abi) > 0: + f.write("\t\t*;\n") + f.write("};\n") + + +def abi_build_vscript(task): + '''generate a vscript file for our public libraries''' + + tgt = task.outputs[0].bldpath(task.env) + + symmap = {} + versions = [] + abi_match = list(task.env.ABI_MATCH) + for f in task.inputs: + fname = f.abspath(task.env) + basename = os.path.basename(fname) + if basename.endswith(".sigs"): + version = basename[len(task.env.LIBNAME)+1:-len(".sigs")] + versions.append(version) + abi_process_file(fname, version, symmap) + continue + if basename == "version-script.map": + version_script_map_process_file(fname, task.env.VERSION, abi_match) + continue + raise Errors.WafError('Unsupported input "%s"' % fname) + if task.env.PRIVATE_LIBRARY: + # For private libraries we need to inject + # each public symbol explicitly into the + # abi match array and remove all explicit + # versioning so that each exported symbol + # is tagged with the private library tag. + for s in symmap: + abi_match.append(s) + symmap = {} + versions = [] + f = open(tgt, mode='w') + try: + abi_write_vscript(f, task.env.LIBNAME, task.env.VERSION, versions, + symmap, abi_match) + finally: + f.close() + +def VSCRIPT_MAP_PRIVATE(bld, libname, orig_vscript, version, private_vscript): + version = version.replace("-", "_").replace("+","_").upper() + t = bld.SAMBA_GENERATOR(private_vscript, + rule=abi_build_vscript, + source=orig_vscript, + group='vscripts', + target=private_vscript) + t.env.ABI_MATCH = [] + t.env.VERSION = version + t.env.LIBNAME = libname + t.env.PRIVATE_LIBRARY = True + t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH', 'PRIVATE_LIBRARY'] +Build.BuildContext.VSCRIPT_MAP_PRIVATE = VSCRIPT_MAP_PRIVATE + +def ABI_VSCRIPT(bld, libname, abi_directory, version, vscript, abi_match=None, private_library=False): + '''generate a vscript file for our public libraries''' + if abi_directory: + source = bld.path.ant_glob('%s/%s-[0-9]*.sigs' % (abi_directory, libname), flat=True) + def abi_file_key(path): + return version_key(path[:-len(".sigs")].rsplit("-")[-1]) + source = sorted(source.split(), key=abi_file_key) + else: + source = '' + + if private_library is None: + private_library = False + + libname = os.path.basename(libname) + version = os.path.basename(version) + libname = libname.replace("-", "_").replace("+","_").upper() + version = version.replace("-", "_").replace("+","_").upper() + + t = bld.SAMBA_GENERATOR(vscript, + rule=abi_build_vscript, + source=source, + group='vscripts', + target=vscript) + if abi_match is None: + abi_match = ["*"] + else: + abi_match = samba_utils.TO_LIST(abi_match) + t.env.ABI_MATCH = abi_match + t.env.VERSION = version + t.env.LIBNAME = libname + t.env.PRIVATE_LIBRARY = private_library + t.vars = ['LIBNAME', 'VERSION', 'ABI_MATCH', 'PRIVATE_LIBRARY'] +Build.BuildContext.ABI_VSCRIPT = ABI_VSCRIPT diff --git a/buildtools/wafsamba/samba_autoconf.py b/buildtools/wafsamba/samba_autoconf.py new file mode 100644 index 0000000..7b383ea --- /dev/null +++ b/buildtools/wafsamba/samba_autoconf.py @@ -0,0 +1,1023 @@ +# a waf tool to add autoconf-like macros to the configure section + +import os, sys +from waflib import Build, Options, Logs, Context +from waflib.Configure import conf +from waflib.TaskGen import feature +from waflib.Tools import c_preproc as preproc +from samba_utils import TO_LIST, GET_TARGET_TYPE, SET_TARGET_TYPE, unique_list, mkdir_p + +missing_headers = set() + +#################################################### +# some autoconf like helpers, to make the transition +# to waf a bit easier for those used to autoconf +# m4 files + +@conf +def DEFINE(conf, d, v, add_to_cflags=False, quote=False): + '''define a config option''' + conf.define(d, v, quote=quote) + if add_to_cflags: + conf.env.append_value('CFLAGS', '-D%s=%s' % (d, str(v))) + +def hlist_to_string(conf, headers=None): + '''convert a headers list to a set of #include lines''' + hlist = conf.env.hlist + if headers: + hlist = hlist[:] + hlist.extend(TO_LIST(headers)) + hdrs = "\n".join('#include <%s>' % h for h in hlist) + + return hdrs + + +@conf +def COMPOUND_START(conf, msg): + '''start a compound test''' + def null_check_message_1(self,*k,**kw): + return + def null_check_message_2(self,*k,**kw): + return + + v = getattr(conf.env, 'in_compound', []) + if v != [] and v != 0: + conf.env.in_compound = v + 1 + return + conf.start_msg(msg) + conf.saved_check_message_1 = conf.start_msg + conf.start_msg = null_check_message_1 + conf.saved_check_message_2 = conf.end_msg + conf.end_msg = null_check_message_2 + conf.env.in_compound = 1 + + +@conf +def COMPOUND_END(conf, result): + '''start a compound test''' + conf.env.in_compound -= 1 + if conf.env.in_compound != 0: + return + conf.start_msg = conf.saved_check_message_1 + conf.end_msg = conf.saved_check_message_2 + p = conf.end_msg + if result is True: + p('ok') + elif not result: + p('not found', 'YELLOW') + else: + p(result) + + +@feature('nolink') +def nolink(self): + '''using the nolink type in conf.check() allows us to avoid + the link stage of a test, thus speeding it up for tests + that where linking is not needed''' + pass + + +def CHECK_HEADER(conf, h, add_headers=False, lib=None): + '''check for a header''' + if h in missing_headers and lib is None: + return False + d = h.upper().replace('/', '_') + d = d.replace('.', '_') + d = d.replace('-', '_') + d = 'HAVE_%s' % d + if CONFIG_SET(conf, d): + if add_headers: + if not h in conf.env.hlist: + conf.env.hlist.append(h) + return True + + (ccflags, ldflags, cpppath) = library_flags(conf, lib) + + hdrs = hlist_to_string(conf, headers=h) + if lib is None: + lib = "" + ret = conf.check(fragment='%s\nint main(void) { return 0; }\n' % hdrs, + type='nolink', + execute=0, + cflags=ccflags, + mandatory=False, + includes=cpppath, + uselib=lib.upper(), + msg="Checking for header %s" % h) + if not ret: + missing_headers.add(h) + return False + + conf.DEFINE(d, 1) + if add_headers and not h in conf.env.hlist: + conf.env.hlist.append(h) + return ret + + +@conf +def CHECK_HEADERS(conf, headers, add_headers=False, together=False, lib=None): + '''check for a list of headers + + when together==True, then the headers accumulate within this test. + This is useful for interdependent headers + ''' + ret = True + if not add_headers and together: + saved_hlist = conf.env.hlist[:] + set_add_headers = True + else: + set_add_headers = add_headers + for hdr in TO_LIST(headers): + if not CHECK_HEADER(conf, hdr, set_add_headers, lib=lib): + ret = False + if not add_headers and together: + conf.env.hlist = saved_hlist + return ret + + +def header_list(conf, headers=None, lib=None): + '''form a list of headers which exist, as a string''' + hlist=[] + if headers is not None: + for h in TO_LIST(headers): + if CHECK_HEADER(conf, h, add_headers=False, lib=lib): + hlist.append(h) + return hlist_to_string(conf, headers=hlist) + + +@conf +def CHECK_TYPE(conf, t, alternate=None, headers=None, define=None, lib=None, msg=None, cflags=''): + '''check for a single type''' + if define is None: + define = 'HAVE_' + t.upper().replace(' ', '_') + if msg is None: + msg='Checking for %s' % t + ret = CHECK_CODE(conf, '%s _x' % t, + define, + execute=False, + headers=headers, + local_include=False, + msg=msg, + cflags=cflags, + lib=lib, + link=False) + if not ret and alternate: + conf.DEFINE(t, alternate) + return ret + + +@conf +def CHECK_TYPES(conf, list, headers=None, define=None, alternate=None, lib=None): + '''check for a list of types''' + ret = True + for t in TO_LIST(list): + if not CHECK_TYPE(conf, t, headers=headers, + define=define, alternate=alternate, lib=lib): + ret = False + return ret + + +@conf +def CHECK_TYPE_IN(conf, t, headers=None, alternate=None, define=None, cflags=''): + '''check for a single type with a header''' + return CHECK_TYPE(conf, t, headers=headers, alternate=alternate, define=define, cflags=cflags) + + +@conf +def CHECK_VARIABLE(conf, v, define=None, always=False, + headers=None, msg=None, lib=None, + mandatory=False): + '''check for a variable declaration (or define)''' + if define is None: + define = 'HAVE_%s' % v.upper() + + if msg is None: + msg="Checking for variable %s" % v + + return CHECK_CODE(conf, + # we need to make sure the compiler doesn't + # optimize it out... + ''' + #ifndef %s + void *_x; _x=(void *)&%s; return (int)_x; + #endif + return 0 + ''' % (v, v), + execute=False, + link=False, + msg=msg, + local_include=False, + lib=lib, + headers=headers, + define=define, + mandatory=mandatory, + always=always) + + +@conf +def CHECK_DECLS(conf, vars, reverse=False, headers=None, lib=None, always=False): + '''check a list of variable declarations, using the HAVE_DECL_xxx form + of define + + When reverse==True then use HAVE_xxx_DECL instead of HAVE_DECL_xxx + ''' + ret = True + for v in TO_LIST(vars): + if not reverse: + define='HAVE_DECL_%s' % v.upper() + else: + define='HAVE_%s_DECL' % v.upper() + if not CHECK_VARIABLE(conf, v, + define=define, + headers=headers, + lib=lib, + msg='Checking for declaration of %s' % v, + always=always): + if not CHECK_CODE(conf, + ''' + return (int)%s; + ''' % (v), + execute=False, + link=False, + msg='Checking for declaration of %s (as enum)' % v, + local_include=False, + headers=headers, + lib=lib, + define=define, + always=always): + ret = False + return ret + + +def CHECK_FUNC(conf, f, link=True, lib=None, headers=None): + '''check for a function''' + define='HAVE_%s' % f.upper() + + ret = False + + in_lib_str = "" + if lib: + in_lib_str = " in %s" % lib + conf.COMPOUND_START('Checking for %s%s' % (f, in_lib_str)) + + if link is None or link: + ret = CHECK_CODE(conf, + # this is based on the autoconf strategy + ''' + #define %s __fake__%s + #ifdef HAVE_LIMITS_H + # include <limits.h> + #else + # include <assert.h> + #endif + #undef %s + #if defined __stub_%s || defined __stub___%s + #error "bad glibc stub" + #endif + extern char %s(); + int main() { return %s(); } + ''' % (f, f, f, f, f, f, f), + execute=False, + link=True, + addmain=False, + add_headers=False, + define=define, + local_include=False, + lib=lib, + headers=headers, + msg='Checking for %s' % f) + + if not ret: + ret = CHECK_CODE(conf, + # it might be a macro + # we need to make sure the compiler doesn't + # optimize it out... + 'void *__x = (void *)%s; return (int)__x' % f, + execute=False, + link=True, + addmain=True, + add_headers=True, + define=define, + local_include=False, + lib=lib, + headers=headers, + msg='Checking for macro %s' % f) + + if not ret and (link is None or not link): + ret = CHECK_VARIABLE(conf, f, + define=define, + headers=headers, + msg='Checking for declaration of %s' % f) + conf.COMPOUND_END(ret) + return ret + + +@conf +def CHECK_FUNCS(conf, list, link=True, lib=None, headers=None): + '''check for a list of functions''' + ret = True + for f in TO_LIST(list): + if not CHECK_FUNC(conf, f, link=link, lib=lib, headers=headers): + ret = False + return ret + + +@conf +def CHECK_SIZEOF(conf, vars, headers=None, define=None, critical=True): + '''check the size of a type''' + for v in TO_LIST(vars): + v_define = define + ret = False + if v_define is None: + v_define = 'SIZEOF_%s' % v.upper().replace(' ', '_') + for size in list((1, 2, 4, 8, 16, 32, 64)): + if CHECK_CODE(conf, + 'static int test_array[1 - 2 * !(((long int)(sizeof(%s))) <= %d)];' % (v, size), + define=v_define, + quote=False, + headers=headers, + local_include=False, + msg="Checking if size of %s == %d" % (v, size)): + conf.DEFINE(v_define, size) + ret = True + break + if not ret and critical: + Logs.error("Couldn't determine size of '%s'" % v) + sys.exit(1) + return ret + +@conf +def CHECK_SIGN(conf, v, headers=None): + '''check the sign of a type''' + define_name = v.upper().replace(' ', '_') + for op, signed in [('<', 'signed'), + ('>', 'unsigned')]: + if CHECK_CODE(conf, + f'static int test_array[1 - 2 * !((({v})-1) {op} 0)];', + define=f'{define_name}_{signed.upper()}', + quote=False, + headers=headers, + local_include=False, + msg=f"Checking if '{v}' is {signed}"): + return True + + return False + +@conf +def CHECK_VALUEOF(conf, v, headers=None, define=None, lib=None): + '''check the value of a variable/define''' + ret = True + v_define = define + if v_define is None: + v_define = 'VALUEOF_%s' % v.upper().replace(' ', '_') + if CHECK_CODE(conf, + 'printf("%%u", (unsigned)(%s))' % v, + define=v_define, + execute=True, + define_ret=True, + quote=False, + lib=lib, + headers=headers, + local_include=False, + msg="Checking value of %s" % v): + return int(conf.env[v_define]) + + return None + +@conf +def CHECK_CODE(conf, code, define, + always=False, execute=False, addmain=True, + add_headers=True, mandatory=False, + headers=None, msg=None, cflags='', includes='# .', + local_include=True, lib=None, link=True, + define_ret=False, quote=False, + on_target=True, strict=False): + '''check if some code compiles and/or runs''' + + if CONFIG_SET(conf, define): + return True + + if headers is not None: + CHECK_HEADERS(conf, headers=headers, lib=lib) + + if add_headers: + hdrs = header_list(conf, headers=headers, lib=lib) + else: + hdrs = '' + if execute: + execute = 1 + else: + execute = 0 + + if addmain: + fragment='%s\n int main(void) { %s; return 0; }\n' % (hdrs, code) + else: + fragment='%s\n%s\n' % (hdrs, code) + + if msg is None: + msg="Checking for %s" % define + + cflags = TO_LIST(cflags) + + # Be strict when relying on a compiler check + # Some compilers (e.g. xlc) ignore non-supported features as warnings + if strict: + if 'WERROR_CFLAGS' in conf.env: + cflags.extend(conf.env['WERROR_CFLAGS']) + + if local_include: + cflags.append('-I%s' % conf.path.abspath()) + + if not link: + type='nolink' + else: + type='cprogram' + + uselib = TO_LIST(lib) + + (ccflags, ldflags, cpppath) = library_flags(conf, uselib) + + includes = TO_LIST(includes) + includes.extend(cpppath) + + uselib = [l.upper() for l in uselib] + + cflags.extend(ccflags) + + if on_target: + test_args = conf.SAMBA_CROSS_ARGS(msg=msg) + else: + test_args = [] + + conf.COMPOUND_START(msg) + + try: + ret = conf.check(fragment=fragment, + execute=execute, + define_name = define, + cflags=cflags, + ldflags=ldflags, + includes=includes, + uselib=uselib, + type=type, + msg=msg, + quote=quote, + test_args=test_args, + define_ret=define_ret) + except Exception: + if always: + conf.DEFINE(define, 0) + else: + conf.undefine(define) + conf.COMPOUND_END(False) + if mandatory: + raise + return False + else: + # Success is indicated by ret but we should unset + # defines set by WAF's c_config.check() because it + # defines it to int(ret) and we want to undefine it + if not ret: + conf.undefine(define) + conf.COMPOUND_END(False) + return False + if not define_ret: + conf.DEFINE(define, 1) + conf.COMPOUND_END(True) + else: + conf.DEFINE(define, ret, quote=quote) + conf.COMPOUND_END(ret) + return True + + +@conf +def CHECK_STRUCTURE_MEMBER(conf, structname, member, + always=False, define=None, headers=None, + lib=None): + '''check for a structure member''' + if define is None: + define = 'HAVE_%s' % member.upper() + return CHECK_CODE(conf, + '%s s; void *_x; _x=(void *)&s.%s' % (structname, member), + define, + execute=False, + link=False, + lib=lib, + always=always, + headers=headers, + local_include=False, + msg="Checking for member %s in %s" % (member, structname)) + + +@conf +def CHECK_CFLAGS(conf, cflags, fragment='int main(void) { return 0; }\n', + mandatory=False): + '''check if the given cflags are accepted by the compiler + ''' + check_cflags = TO_LIST(cflags) + if 'WERROR_CFLAGS' in conf.env: + check_cflags.extend(conf.env['WERROR_CFLAGS']) + return conf.check(fragment=fragment, + execute=0, + mandatory=mandatory, + type='nolink', + cflags=check_cflags, + msg="Checking compiler accepts %s" % cflags) + +@conf +def CHECK_LDFLAGS(conf, ldflags, + mandatory=False): + '''check if the given ldflags are accepted by the linker + ''' + return conf.check(fragment='int main(void) { return 0; }\n', + execute=0, + ldflags=ldflags, + mandatory=mandatory, + msg="Checking linker accepts %s" % ldflags) + + +@conf +def CONFIG_GET(conf, option): + '''return True if a configuration option was found''' + if (option in conf.env): + return conf.env[option] + else: + return None + +@conf +def CONFIG_SET(conf, option): + '''return True if a configuration option was found''' + if option not in conf.env: + return False + v = conf.env[option] + if v is None: + return False + if v == []: + return False + if v == (): + return False + return True + +@conf +def CONFIG_RESET(conf, option): + if option not in conf.env: + return + del conf.env[option] + +Build.BuildContext.CONFIG_RESET = CONFIG_RESET +Build.BuildContext.CONFIG_SET = CONFIG_SET +Build.BuildContext.CONFIG_GET = CONFIG_GET + + +def library_flags(self, libs): + '''work out flags from pkg_config''' + ccflags = [] + ldflags = [] + cpppath = [] + for lib in TO_LIST(libs): + # note that we do not add the -I and -L in here, as that is added by the waf + # core. Adding it here would just change the order that it is put on the link line + # which can cause system paths to be added before internal libraries + extra_ccflags = TO_LIST(getattr(self.env, 'CFLAGS_%s' % lib.upper(), [])) + extra_ldflags = TO_LIST(getattr(self.env, 'LDFLAGS_%s' % lib.upper(), [])) + extra_cpppath = TO_LIST(getattr(self.env, 'CPPPATH_%s' % lib.upper(), [])) + ccflags.extend(extra_ccflags) + ldflags.extend(extra_ldflags) + cpppath.extend(extra_cpppath) + + extra_cpppath = TO_LIST(getattr(self.env, 'INCLUDES_%s' % lib.upper(), [])) + cpppath.extend(extra_cpppath) + if 'EXTRA_LDFLAGS' in self.env: + ldflags.extend(self.env['EXTRA_LDFLAGS']) + + ccflags = unique_list(ccflags) + ldflags = unique_list(ldflags) + cpppath = unique_list(cpppath) + return (ccflags, ldflags, cpppath) + + +@conf +def CHECK_LIB(conf, libs, mandatory=False, empty_decl=True, set_target=True, shlib=False): + '''check if a set of libraries exist as system libraries + + returns the sublist of libs that do exist as a syslib or [] + ''' + + fragment= ''' +int foo() +{ + int v = 2; + return v*2; +} +''' + ret = [] + liblist = TO_LIST(libs) + for lib in liblist[:]: + if GET_TARGET_TYPE(conf, lib) == 'SYSLIB': + ret.append(lib) + continue + + (ccflags, ldflags, cpppath) = library_flags(conf, lib) + if shlib: + res = conf.check(features='c cshlib', fragment=fragment, lib=lib, uselib_store=lib, cflags=ccflags, ldflags=ldflags, uselib=lib.upper(), mandatory=False) + else: + res = conf.check(lib=lib, uselib_store=lib, cflags=ccflags, ldflags=ldflags, uselib=lib.upper(), mandatory=False) + + if not res: + if mandatory: + Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, libs)) + sys.exit(1) + if empty_decl: + # if it isn't a mandatory library, then remove it from dependency lists + if set_target: + SET_TARGET_TYPE(conf, lib, 'EMPTY') + else: + conf.define('HAVE_LIB%s' % lib.upper().replace('-','_').replace('.','_'), 1) + conf.env['LIB_' + lib.upper()] = lib + if set_target: + conf.SET_TARGET_TYPE(lib, 'SYSLIB') + ret.append(lib) + + return ret + + + +@conf +def CHECK_FUNCS_IN(conf, list, library, mandatory=False, checklibc=False, + headers=None, link=True, empty_decl=True, set_target=True): + """ + check that the functions in 'list' are available in 'library' + if they are, then make that library available as a dependency + + if the library is not available and mandatory==True, then + raise an error. + + If the library is not available and mandatory==False, then + add the library to the list of dependencies to remove from + build rules + + optionally check for the functions first in libc + """ + remaining = TO_LIST(list) + liblist = TO_LIST(library) + + # check if some already found + for f in remaining[:]: + if CONFIG_SET(conf, 'HAVE_%s' % f.upper()): + remaining.remove(f) + + # see if the functions are in libc + if checklibc: + for f in remaining[:]: + if CHECK_FUNC(conf, f, link=True, headers=headers): + remaining.remove(f) + + if remaining == []: + for lib in liblist: + if GET_TARGET_TYPE(conf, lib) != 'SYSLIB' and empty_decl: + SET_TARGET_TYPE(conf, lib, 'EMPTY') + return True + + checklist = conf.CHECK_LIB(liblist, empty_decl=empty_decl, set_target=set_target) + for lib in liblist[:]: + if not lib in checklist and mandatory: + Logs.error("Mandatory library '%s' not found for functions '%s'" % (lib, list)) + sys.exit(1) + + ret = True + for f in remaining: + if not CHECK_FUNC(conf, f, lib=' '.join(checklist), headers=headers, link=link): + ret = False + + return ret + + +@conf +def IN_LAUNCH_DIR(conf): + '''return True if this rule is being run from the launch directory''' + return os.path.realpath(conf.path.abspath()) == os.path.realpath(Context.launch_dir) +Options.OptionsContext.IN_LAUNCH_DIR = IN_LAUNCH_DIR + + +@conf +def SAMBA_CONFIG_H(conf, path=None): + '''write out config.h in the right directory''' + # we don't want to produce a config.h in places like lib/replace + # when we are building projects that depend on lib/replace + if not IN_LAUNCH_DIR(conf): + return + + # we need to build real code that can't be optimized away to test + stack_protect_list = ['-fstack-protector-strong', '-fstack-protector'] + for stack_protect_flag in stack_protect_list: + flag_supported = conf.check(fragment=''' + #include <stdio.h> + + int main(void) + { + char t[100000]; + while (fgets(t, sizeof(t), stdin)); + return 0; + } + ''', + execute=0, + cflags=[ '-Werror', '-Wp,-D_FORTIFY_SOURCE=2', stack_protect_flag], + mandatory=False, + msg='Checking if compiler accepts %s' % (stack_protect_flag)) + if flag_supported: + conf.ADD_CFLAGS('%s' % (stack_protect_flag)) + break + + flag_supported = conf.check(fragment=''' + #include <stdio.h> + + int main(void) + { + char t[100000]; + while (fgets(t, sizeof(t), stdin)); + return 0; + } + ''', + execute=0, + cflags=[ '-Werror', '-fstack-clash-protection'], + mandatory=False, + msg='Checking if compiler accepts -fstack-clash-protection') + if flag_supported: + conf.ADD_CFLAGS('-fstack-clash-protection') + + if Options.options.debug: + conf.ADD_CFLAGS('-g', testflags=True) + + if Options.options.pidl_developer: + conf.env.PIDL_DEVELOPER_MODE = True + + if Options.options.developer: + conf.env.DEVELOPER_MODE = True + + conf.ADD_CFLAGS('-g', testflags=True) + conf.ADD_CFLAGS('-Wall', testflags=True) + conf.ADD_CFLAGS('-Wshadow', testflags=True) + conf.ADD_CFLAGS('-Wmissing-prototypes', testflags=True) + if CHECK_CODE(conf, + 'struct a { int b; }; struct c { struct a d; } e = { };', + 'CHECK_C99_INIT', + link=False, + cflags='-Wmissing-field-initializers -Werror=missing-field-initializers', + msg="Checking C99 init of nested structs."): + conf.ADD_CFLAGS('-Wmissing-field-initializers', testflags=True) + conf.ADD_CFLAGS('-Wformat-overflow=2', testflags=True) + conf.ADD_CFLAGS('-Wformat-zero-length', testflags=True) + conf.ADD_CFLAGS('-Wcast-align -Wcast-qual', testflags=True) + conf.ADD_CFLAGS('-fno-common', testflags=True) + + conf.ADD_CFLAGS('-Werror=address', testflags=True) + # we add these here to ensure that -Wstrict-prototypes is not set during configure + conf.ADD_CFLAGS('-Werror=strict-prototypes -Wstrict-prototypes', + testflags=True) + conf.ADD_CFLAGS('-Werror=write-strings -Wwrite-strings', + testflags=True) + conf.ADD_CFLAGS('-Werror-implicit-function-declaration', + testflags=True) + conf.ADD_CFLAGS('-Werror=implicit-int', + testflags=True) + conf.ADD_CFLAGS('-Werror=pointer-arith -Wpointer-arith', + testflags=True) + conf.ADD_CFLAGS('-Werror=declaration-after-statement -Wdeclaration-after-statement', + testflags=True) + conf.ADD_CFLAGS('-Werror=return-type -Wreturn-type', + testflags=True) + conf.ADD_CFLAGS('-Werror=uninitialized -Wuninitialized', + testflags=True) + conf.ADD_CFLAGS('-Wimplicit-fallthrough', + testflags=True) + conf.ADD_CFLAGS('-Werror=strict-overflow -Wstrict-overflow=2', + testflags=True) + conf.ADD_CFLAGS('-Werror=old-style-definition -Wold-style-definition', + testflags=True) + + conf.ADD_CFLAGS('-Wformat=2 -Wno-format-y2k', testflags=True) + conf.ADD_CFLAGS('-Wno-format-zero-length', testflags=True) + conf.ADD_CFLAGS('-Werror=format-security -Wformat-security', + testflags=True, prereq_flags='-Wformat') + # This check is because for ldb_search(), a NULL format string + # is not an error, but some compilers complain about that. + if CHECK_CFLAGS(conf, ["-Werror=format", "-Wformat=2"], ''' +int testformat(char *format, ...) __attribute__ ((format (__printf__, 1, 2))); + +int main(void) { + testformat(0); + return 0; +} + +'''): + if not 'EXTRA_CFLAGS' in conf.env: + conf.env['EXTRA_CFLAGS'] = [] + conf.env['EXTRA_CFLAGS'].extend(TO_LIST("-Werror=format")) + + if CHECK_CFLAGS(conf, ["-Wno-error=array-bounds"]): + conf.define('HAVE_WNO_ERROR_ARRAY_BOUNDS', 1) + + if CHECK_CFLAGS(conf, ["-Wno-error=stringop-overflow"]): + conf.define('HAVE_WNO_ERROR_STRINGOP_OVERFLOW', 1) + + if CHECK_CFLAGS(conf, ["-Wno-error=declaration-after-statement"]): + conf.define('HAVE_WNO_ERROR_DECLARATION_AFTER_STATEMENT', 1) + + if not Options.options.disable_warnings_as_errors: + conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Werror -Wno-error=deprecated-declarations', testflags=True) + conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Wno-error=tautological-compare', testflags=True) + conf.ADD_NAMED_CFLAGS('PICKY_CFLAGS', '-Wno-error=cast-align', testflags=True) + + if Options.options.fatal_errors: + conf.ADD_CFLAGS('-Wfatal-errors', testflags=True) + + if Options.options.pedantic: + conf.ADD_CFLAGS('-W', testflags=True) + + if (Options.options.address_sanitizer or + Options.options.undefined_sanitizer): + conf.ADD_CFLAGS('-g -O1', testflags=True) + if (Options.options.address_sanitizer + or Options.options.memory_sanitizer): + conf.ADD_CFLAGS('-fno-omit-frame-pointer', testflags=True) + if Options.options.address_sanitizer: + conf.ADD_CFLAGS('-fsanitize=address', testflags=True) + conf.ADD_LDFLAGS('-fsanitize=address', testflags=True) + conf.env['ADDRESS_SANITIZER'] = True + if Options.options.undefined_sanitizer: + conf.ADD_CFLAGS('-fsanitize=undefined', testflags=True) + conf.ADD_CFLAGS('-fsanitize=null', testflags=True) + conf.ADD_CFLAGS('-fsanitize=alignment', testflags=True) + conf.ADD_LDFLAGS('-fsanitize=undefined', testflags=True) + conf.env['UNDEFINED_SANITIZER'] = True + + # MemorySanitizer is only available if you build with clang + if Options.options.memory_sanitizer: + conf.ADD_CFLAGS('-g -O2', testflags=True) + conf.ADD_CFLAGS('-fsanitize=memory', testflags=True) + conf.ADD_CFLAGS('-fsanitize-memory-track-origins=2', testflags=True) + conf.ADD_LDFLAGS('-fsanitize=memory') + conf.env['MEMORY_SANITIZER'] = True + + # Let people pass an additional ADDITIONAL_{CFLAGS,LDFLAGS} + # environment variables which are only used the for final build. + # + # The CFLAGS and LDFLAGS environment variables are also + # used for the configure checks which might impact their results. + # + # If these variables don't pass a smoke test, fail the configure + + conf.add_os_flags('ADDITIONAL_CFLAGS') + if conf.env.ADDITIONAL_CFLAGS: + conf.CHECK_CFLAGS(conf.env['ADDITIONAL_CFLAGS'], + mandatory=True) + conf.env['EXTRA_CFLAGS'].extend(conf.env['ADDITIONAL_CFLAGS']) + + conf.add_os_flags('ADDITIONAL_LDFLAGS') + if conf.env.ADDITIONAL_LDFLAGS: + conf.CHECK_LDFLAGS(conf.env['ADDITIONAL_LDFLAGS'], + mandatory=True) + conf.env['EXTRA_LDFLAGS'].extend(conf.env['ADDITIONAL_LDFLAGS']) + + if path is None: + conf.write_config_header('default/config.h', top=True, remove=False) + else: + conf.write_config_header(os.path.join(conf.variant, path), remove=False) + for key in conf.env.define_key: + conf.undefine(key, from_env=False) + conf.env.define_key = [] + conf.SAMBA_CROSS_CHECK_COMPLETE() + + +@conf +def CONFIG_PATH(conf, name, default): + '''setup a configurable path''' + if not name in conf.env: + if default[0] == '/': + conf.env[name] = default + else: + conf.env[name] = conf.env['PREFIX'] + default + +@conf +def ADD_NAMED_CFLAGS(conf, name, flags, testflags=False, prereq_flags=None): + '''add some CFLAGS to the command line + optionally set testflags to ensure all the flags work + ''' + if prereq_flags is None: + prereq_flags = [] + prereq_flags = TO_LIST(prereq_flags) + if testflags: + ok_flags=[] + for f in flags.split(): + if CHECK_CFLAGS(conf, [f] + prereq_flags): + ok_flags.append(f) + flags = ok_flags + if not name in conf.env: + conf.env[name] = [] + conf.env[name].extend(TO_LIST(flags)) + +@conf +def ADD_CFLAGS(conf, flags, testflags=False, prereq_flags=None): + '''add some CFLAGS to the command line + optionally set testflags to ensure all the flags work + ''' + if prereq_flags is None: + prereq_flags = [] + ADD_NAMED_CFLAGS(conf, 'EXTRA_CFLAGS', flags, testflags=testflags, + prereq_flags=prereq_flags) + +@conf +def ADD_LDFLAGS(conf, flags, testflags=False): + '''add some LDFLAGS to the command line + optionally set testflags to ensure all the flags work + + this will return the flags that are added, if any + ''' + if testflags: + ok_flags=[] + for f in flags.split(): + if CHECK_LDFLAGS(conf, f): + ok_flags.append(f) + flags = ok_flags + if not 'EXTRA_LDFLAGS' in conf.env: + conf.env['EXTRA_LDFLAGS'] = [] + conf.env['EXTRA_LDFLAGS'].extend(TO_LIST(flags)) + return flags + + +@conf +def ADD_EXTRA_INCLUDES(conf, includes): + '''add some extra include directories to all builds''' + if not 'EXTRA_INCLUDES' in conf.env: + conf.env['EXTRA_INCLUDES'] = [] + conf.env['EXTRA_INCLUDES'].extend(TO_LIST(includes)) + + + +def CURRENT_CFLAGS(bld, target, cflags, + allow_warnings=False, + use_hostcc=False, + hide_symbols=False): + '''work out the current flags. local flags are added first''' + ret = [] + if use_hostcc: + ret += ['-D_SAMBA_HOSTCC_'] + ret += TO_LIST(cflags) + if not 'EXTRA_CFLAGS' in bld.env: + list = [] + else: + list = bld.env['EXTRA_CFLAGS'] + ret.extend(list) + if not allow_warnings and 'PICKY_CFLAGS' in bld.env: + list = bld.env['PICKY_CFLAGS'] + ret.extend(list) + if hide_symbols and bld.env.HAVE_VISIBILITY_ATTR: + ret.append(bld.env.VISIBILITY_CFLAGS) + return ret + + +@conf +def CHECK_CC_ENV(conf): + """trim whitespaces from 'CC'. + The build farm sometimes puts a space at the start""" + if os.environ.get('CC'): + conf.env.CC = TO_LIST(os.environ.get('CC')) + + +@conf +def SETUP_CONFIGURE_CACHE(conf, enable): + '''enable/disable cache of configure results''' + if enable: + # when -C is chosen, we will use a private cache and will + # not look into system includes. This roughly matches what + # autoconf does with -C + cache_path = os.path.join(conf.bldnode.abspath(), '.confcache') + mkdir_p(cache_path) + Options.cache_global = os.environ['WAFCACHE'] = cache_path + else: + # when -C is not chosen we will not cache configure checks + # We set the recursion limit low to prevent waf from spending + # a lot of time on the signatures of the files. + Options.cache_global = os.environ['WAFCACHE'] = '' + preproc.recursion_limit = 1 + # in either case we don't need to scan system includes + preproc.go_absolute = False + + +@conf +def SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS(conf): + if (Options.options.address_sanitizer + or Options.options.memory_sanitizer + or Options.options.enable_libfuzzer): + # Sanitizers can rely on symbols undefined at library link time and the + # symbols used for fuzzers are only defined by compiler wrappers. + return + + if not sys.platform.startswith("openbsd"): + # we don't want any libraries or modules to rely on runtime + # resolution of symbols + conf.env.undefined_ldflags = conf.ADD_LDFLAGS('-Wl,-no-undefined', testflags=True) + + if (conf.env.undefined_ignore_ldflags == [] and + conf.CHECK_LDFLAGS(['-undefined', 'dynamic_lookup'])): + conf.env.undefined_ignore_ldflags = ['-undefined', 'dynamic_lookup'] diff --git a/buildtools/wafsamba/samba_autoproto.py b/buildtools/wafsamba/samba_autoproto.py new file mode 100644 index 0000000..87fb8ef --- /dev/null +++ b/buildtools/wafsamba/samba_autoproto.py @@ -0,0 +1,24 @@ +# waf build tool for building automatic prototypes from C source + +import os +from waflib import Build +from samba_utils import SET_TARGET_TYPE + +def SAMBA_AUTOPROTO(bld, header, source): + '''rule for samba prototype generation''' + bld.SET_BUILD_GROUP('prototypes') + relpath = os.path.relpath(bld.path.abspath(), bld.srcnode.abspath()) + name = os.path.join(relpath, header) + SET_TARGET_TYPE(bld, name, 'PROTOTYPE') + t = bld( + name = name, + source = source, + target = header, + update_outputs=True, + ext_out='.c', + before ='c', + rule = '${PERL} "${SCRIPT}/mkproto.pl" --srcdir=.. --builddir=. --public=/dev/null --private="${TGT}" ${SRC}' + ) + t.env.SCRIPT = os.path.join(bld.srcnode.abspath(), 'source4/script') +Build.BuildContext.SAMBA_AUTOPROTO = SAMBA_AUTOPROTO + diff --git a/buildtools/wafsamba/samba_bundled.py b/buildtools/wafsamba/samba_bundled.py new file mode 100644 index 0000000..029be15 --- /dev/null +++ b/buildtools/wafsamba/samba_bundled.py @@ -0,0 +1,273 @@ +# functions to support bundled libraries + +import sys +from waflib import Build, Options, Logs +from waflib.Configure import conf +from wafsamba import samba_utils + +def PRIVATE_NAME(bld, name): + '''possibly rename a library to include a bundled extension''' + + extension = bld.env.PRIVATE_EXTENSION + + if name in bld.env.PRIVATE_EXTENSION_EXCEPTION: + return name + + if extension and name.startswith('%s' % extension): + return name + + if extension and name.endswith('%s' % extension): + return name + + return "%s-%s" % (name, extension) + + +def target_in_list(target, lst, default): + for l in lst: + if target == l: + return True + if '!' + target == l: + return False + if l == 'ALL': + return True + if l == 'NONE': + return False + return default + + +def BUILTIN_LIBRARY(bld, name): + '''return True if a library should be builtin + instead of being built as a shared lib''' + return target_in_list(name, bld.env.BUILTIN_LIBRARIES, False) +Build.BuildContext.BUILTIN_LIBRARY = BUILTIN_LIBRARY + + +def BUILTIN_DEFAULT(opt, builtins): + '''set a comma separated default list of builtin libraries for this package''' + if 'BUILTIN_LIBRARIES_DEFAULT' in Options.options.__dict__: + return + Options.options.__dict__['BUILTIN_LIBRARIES_DEFAULT'] = builtins +Options.OptionsContext.BUILTIN_DEFAULT = BUILTIN_DEFAULT + + +def PRIVATE_EXTENSION_DEFAULT(opt, extension, noextension=''): + '''set a default private library extension''' + if 'PRIVATE_EXTENSION_DEFAULT' in Options.options.__dict__: + return + Options.options.__dict__['PRIVATE_EXTENSION_DEFAULT'] = extension + Options.options.__dict__['PRIVATE_EXTENSION_EXCEPTION'] = noextension +Options.OptionsContext.PRIVATE_EXTENSION_DEFAULT = PRIVATE_EXTENSION_DEFAULT + + +def minimum_library_version(conf, libname, default): + '''allow override of minimum system library version''' + + minlist = Options.options.MINIMUM_LIBRARY_VERSION + if not minlist: + return default + + for m in minlist.split(','): + a = m.split(':') + if len(a) != 2: + Logs.error("Bad syntax for --minimum-library-version of %s" % m) + sys.exit(1) + if a[0] == libname: + return a[1] + return default + + +@conf +def LIB_MAY_BE_BUNDLED(conf, libname): + if libname in conf.env.SYSTEM_LIBS: + return False + if libname in conf.env.BUNDLED_LIBS: + return True + if '!%s' % libname in conf.env.BUNDLED_LIBS: + return False + if 'NONE' in conf.env.BUNDLED_LIBS: + return False + return True + +def __LIB_MUST_BE(liblist, libname): + if libname in liblist: + return True + if '!%s' % libname in liblist: + return False + if 'ALL' in liblist: + return True + return False + +@conf +def LIB_MUST_BE_BUNDLED(conf, libname): + return __LIB_MUST_BE(conf.env.BUNDLED_LIBS, libname) + +@conf +def LIB_MUST_BE_PRIVATE(conf, libname): + return __LIB_MUST_BE(conf.env.PRIVATE_LIBS, libname) + +@conf +def CHECK_BUNDLED_SYSTEM_PKG(conf, libname, minversion='0.0.0', + maxversion=None, version_blacklist=None, + onlyif=None, implied_deps=None, pkg=None): + '''check if a library is available as a system library. + + This only tries using pkg-config + ''' + if version_blacklist is None: + version_blacklist = [] + return conf.CHECK_BUNDLED_SYSTEM(libname, + minversion=minversion, + maxversion=maxversion, + version_blacklist=version_blacklist, + onlyif=onlyif, + implied_deps=implied_deps, + pkg=pkg) + +@conf +def CHECK_BUNDLED_SYSTEM(conf, libname, minversion='0.0.0', + maxversion=None, version_blacklist=None, + checkfunctions=None, headers=None, checkcode=None, + onlyif=None, implied_deps=None, + require_headers=True, pkg=None, set_target=True): + '''check if a library is available as a system library. + this first tries via pkg-config, then if that fails + tries by testing for a specified function in the specified lib + ''' + # We always do a logic validation of 'onlyif' first + if version_blacklist is None: + version_blacklist = [] + missing = [] + if onlyif: + for l in samba_utils.TO_LIST(onlyif): + f = 'FOUND_SYSTEMLIB_%s' % l + if not f in conf.env: + Logs.error('ERROR: CHECK_BUNDLED_SYSTEM(%s) - ' % (libname) + + 'missing prerequisite check for ' + + 'system library %s, onlyif=%r' % (l, onlyif)) + sys.exit(1) + if not conf.env[f]: + missing.append(l) + found = 'FOUND_SYSTEMLIB_%s' % libname + if found in conf.env: + return conf.env[found] + if conf.LIB_MUST_BE_BUNDLED(libname): + conf.env[found] = False + return False + + # see if the library should only use a system version if another dependent + # system version is found. That prevents possible use of mixed library + # versions + if missing: + if not conf.LIB_MAY_BE_BUNDLED(libname): + Logs.error('ERROR: Use of system library %s depends on missing system library/libraries %r' % (libname, missing)) + sys.exit(1) + conf.env[found] = False + return False + + def check_functions_headers_code(): + '''helper function for CHECK_BUNDLED_SYSTEM''' + if require_headers and headers and not conf.CHECK_HEADERS(headers, lib=libname): + return False + if checkfunctions is not None: + ok = conf.CHECK_FUNCS_IN(checkfunctions, libname, headers=headers, + empty_decl=False, set_target=False) + if not ok: + return False + if checkcode is not None: + define='CHECK_BUNDLED_SYSTEM_%s' % libname.upper() + ok = conf.CHECK_CODE(checkcode, lib=libname, + headers=headers, local_include=False, + msg=msg, define=define) + conf.CONFIG_RESET(define) + if not ok: + return False + return True + + minversion = minimum_library_version(conf, libname, minversion) + + msg = 'Checking for system %s' % libname + msg_ver = [] + if minversion != '0.0.0': + msg_ver.append('>=%s' % minversion) + if maxversion is not None: + msg_ver.append('<=%s' % maxversion) + for v in version_blacklist: + msg_ver.append('!=%s' % v) + if msg_ver != []: + msg += " (%s)" % (" ".join(msg_ver)) + + uselib_store=libname.upper() + if pkg is None: + pkg = libname + + version_checks = '%s >= %s' % (pkg, minversion) + if maxversion is not None: + version_checks += ' %s <= %s' % (pkg, maxversion) + + version_checks += "".join(' %s != %s' % (pkg, v) for v in version_blacklist) + + # try pkgconfig first + if (conf.CHECK_CFG(package=pkg, + args='"%s" --cflags --libs' % (version_checks), + msg=msg, uselib_store=uselib_store) and + check_functions_headers_code()): + if set_target: + conf.SET_TARGET_TYPE(libname, 'SYSLIB') + conf.env[found] = True + if implied_deps: + conf.SET_SYSLIB_DEPS(libname, implied_deps) + return True + if checkfunctions is not None: + if check_functions_headers_code(): + conf.env[found] = True + if implied_deps: + conf.SET_SYSLIB_DEPS(libname, implied_deps) + if set_target: + conf.SET_TARGET_TYPE(libname, 'SYSLIB') + return True + conf.env[found] = False + if not conf.LIB_MAY_BE_BUNDLED(libname): + Logs.error('ERROR: System library %s of version %s not found, and bundling disabled' % (libname, minversion)) + sys.exit(1) + return False + + +def tuplize_version(version): + return tuple([int(x) for x in version.split(".")]) + +@conf +def CHECK_BUNDLED_SYSTEM_PYTHON(conf, libname, modulename, minversion='0.0.0'): + '''check if a python module is available on the system and + has the specified minimum version. + ''' + if conf.LIB_MUST_BE_BUNDLED(libname): + return False + + # see if the library should only use a system version if another dependent + # system version is found. That prevents possible use of mixed library + # versions + minversion = minimum_library_version(conf, libname, minversion) + + try: + m = __import__(modulename) + except ImportError: + found = False + else: + try: + version = m.__version__ + except AttributeError: + found = False + else: + found = tuplize_version(version) >= tuplize_version(minversion) + if not found and not conf.LIB_MAY_BE_BUNDLED(libname): + Logs.error('ERROR: Python module %s of version %s not found, and bundling disabled' % (libname, minversion)) + sys.exit(1) + return found + + +def NONSHARED_BINARY(bld, name): + '''return True if a binary should be built without non-system shared libs''' + return target_in_list(name, bld.env.NONSHARED_BINARIES, False) +Build.BuildContext.NONSHARED_BINARY = NONSHARED_BINARY + + diff --git a/buildtools/wafsamba/samba_conftests.py b/buildtools/wafsamba/samba_conftests.py new file mode 100644 index 0000000..38ce20d --- /dev/null +++ b/buildtools/wafsamba/samba_conftests.py @@ -0,0 +1,529 @@ +# a set of config tests that use the samba_autoconf functions +# to test for commonly needed configuration options + +import os, shutil, re +from waflib import Build, Configure, Utils, Options, Logs, Errors +from waflib.Configure import conf +from samba_utils import TO_LIST, ADD_LD_LIBRARY_PATH, get_string + + +def add_option(self, *k, **kw): + '''syntax help: provide the "match" attribute to opt.add_option() so that folders can be added to specific config tests''' + Options.OptionsContext.parser = self + match = kw.get('match', []) + if match: + del kw['match'] + opt = self.parser.add_option(*k, **kw) + opt.match = match + return opt +Options.OptionsContext.add_option = add_option + +@conf +def check(self, *k, **kw): + '''Override the waf defaults to inject --with-directory options''' + + if not 'env' in kw: + kw['env'] = self.env.derive() + + # match the configuration test with specific options, for example: + # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" + additional_dirs = [] + if 'msg' in kw: + msg = kw['msg'] + for x in Options.OptionsContext.parser.parser.option_list: + if getattr(x, 'match', None) and msg in x.match: + d = getattr(Options.options, x.dest, '') + if d: + additional_dirs.append(d) + + # we add the additional dirs twice: once for the test data, and again if the compilation test succeeds below + def add_options_dir(dirs, env): + for x in dirs: + if not x in env.CPPPATH: + env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH + if not x in env.LIBPATH: + env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH + + add_options_dir(additional_dirs, kw['env']) + + self.validate_c(kw) + self.start_msg(kw['msg']) + ret = None + try: + ret = self.run_c_code(*k, **kw) + except Configure.ConfigurationError as e: + self.end_msg(kw['errmsg'], 'YELLOW') + if 'mandatory' in kw and kw['mandatory']: + if Logs.verbose > 1: + raise + else: + self.fatal('the configuration failed (see %r)' % self.log.name) + else: + kw['success'] = ret + self.end_msg(self.ret_msg(kw['okmsg'], kw)) + + # success! keep the CPPPATH/LIBPATH + add_options_dir(additional_dirs, self.env) + + self.post_check(*k, **kw) + if not kw.get('execute', False): + return ret == 0 + return ret + + +@conf +def CHECK_ICONV(conf, define='HAVE_NATIVE_ICONV'): + '''check if the iconv library is installed + optionally pass a define''' + if conf.CHECK_FUNCS_IN('iconv_open', 'iconv', checklibc=True, headers='iconv.h'): + conf.DEFINE(define, 1) + return True + return False + + +@conf +def CHECK_LARGEFILE(conf, define='HAVE_LARGEFILE'): + '''see what we need for largefile support''' + getconf_cflags = conf.CHECK_COMMAND(['getconf', 'LFS_CFLAGS']) + if getconf_cflags is not False: + if (conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', + define='WORKING_GETCONF_LFS_CFLAGS', + execute=True, + cflags=getconf_cflags, + msg='Checking getconf large file support flags work')): + conf.ADD_CFLAGS(getconf_cflags) + getconf_cflags_list=TO_LIST(getconf_cflags) + for flag in getconf_cflags_list: + if flag[:2] == "-D": + flag_split = flag[2:].split('=') + if len(flag_split) == 1: + conf.DEFINE(flag_split[0], '1') + else: + conf.DEFINE(flag_split[0], flag_split[1]) + + if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', + define, + execute=True, + msg='Checking for large file support without additional flags'): + return True + + if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', + define, + execute=True, + cflags='-D_FILE_OFFSET_BITS=64', + msg='Checking for -D_FILE_OFFSET_BITS=64'): + conf.DEFINE('_FILE_OFFSET_BITS', 64) + return True + + if conf.CHECK_CODE('if (sizeof(off_t) < 8) return 1', + define, + execute=True, + cflags='-D_LARGE_FILES', + msg='Checking for -D_LARGE_FILES'): + conf.DEFINE('_LARGE_FILES', 1) + return True + return False + + +@conf +def CHECK_C_PROTOTYPE(conf, function, prototype, define, headers=None, msg=None, lib=None): + '''verify that a C prototype matches the one on the current system''' + if not conf.CHECK_DECLS(function, headers=headers): + return False + if not msg: + msg = 'Checking C prototype for %s' % function + return conf.CHECK_CODE('%s; void *_x = (void *)%s' % (prototype, function), + define=define, + local_include=False, + headers=headers, + link=False, + execute=False, + msg=msg, + lib=lib) + + +@conf +def CHECK_CHARSET_EXISTS(conf, charset, outcharset='UCS-2LE', headers=None, define=None): + '''check that a named charset is able to be used with iconv_open() for conversion + to a target charset + ''' + msg = 'Checking if can we convert from %s to %s' % (charset, outcharset) + if define is None: + define = 'HAVE_CHARSET_%s' % charset.upper().replace('-','_') + return conf.CHECK_CODE(''' + iconv_t cd = iconv_open("%s", "%s"); + if (cd == 0 || cd == (iconv_t)-1) return -1; + ''' % (charset, outcharset), + define=define, + execute=True, + msg=msg, + lib='iconv', + headers=headers) + +def find_config_dir(conf): + '''find a directory to run tests in''' + k = 0 + while k < 10000: + dir = os.path.join(conf.bldnode.abspath(), '.conf_check_%d' % k) + try: + shutil.rmtree(dir) + except OSError: + pass + try: + os.stat(dir) + except: + break + k += 1 + + try: + os.makedirs(dir) + except: + conf.fatal('cannot create a configuration test folder %r' % dir) + + try: + os.stat(dir) + except: + conf.fatal('cannot use the configuration test folder %r' % dir) + return dir + +@conf +def CHECK_SHLIB_INTRASINC_NAME_FLAGS(conf, msg): + ''' + check if the waf default flags for setting the name of lib + are ok + ''' + + snip = ''' +int foo(int v) { + return v * 2; +} +''' + return conf.check(features='c cshlib',vnum="1",fragment=snip,msg=msg, mandatory=False) + +@conf +def CHECK_NEED_LC(conf, msg): + '''check if we need -lc''' + + dir = find_config_dir(conf) + + env = conf.env + + bdir = os.path.join(dir, 'testbuild2') + if not os.path.exists(bdir): + os.makedirs(bdir) + + + subdir = os.path.join(dir, "liblctest") + + os.makedirs(subdir) + + Utils.writef(os.path.join(subdir, 'liblc1.c'), '#include <stdio.h>\nint lib_func(void) { FILE *f = fopen("foo", "r");}\n') + + bld = Build.BuildContext() + bld.log = conf.log + bld.all_envs.update(conf.all_envs) + bld.all_envs['default'] = env + bld.lst_variants = bld.all_envs.keys() + bld.load_dirs(dir, bdir) + + bld.rescan(bld.srcnode) + + bld(features='c cshlib', + source='liblctest/liblc1.c', + ldflags=conf.env['EXTRA_LDFLAGS'], + target='liblc', + name='liblc') + + try: + bld.compile() + conf.check_message(msg, '', True) + return True + except: + conf.check_message(msg, '', False) + return False + + +@conf +def CHECK_SHLIB_W_PYTHON(conf, msg): + '''check if we need -undefined dynamic_lookup''' + + dir = find_config_dir(conf) + snip = ''' +#include <Python.h> +#include <crt_externs.h> +#define environ (*_NSGetEnviron()) + +static PyObject *ldb_module = NULL; +int foo(int v) { + extern char **environ; + environ[0] = 1; + ldb_module = PyImport_ImportModule("ldb"); + return v * 2; +} +''' + return conf.check(features='c cshlib',uselib='PYEMBED',fragment=snip,msg=msg, mandatory=False) + +# this one is quite complex, and should probably be broken up +# into several parts. I'd quite like to create a set of CHECK_COMPOUND() +# functions that make writing complex compound tests like this much easier +@conf +def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): + '''see if the platform supports building libraries''' + + if msg is None: + if rpath: + msg = "rpath library support" + else: + msg = "building library support" + + dir = find_config_dir(conf) + + bdir = os.path.join(dir, 'testbuild') + if not os.path.exists(bdir): + os.makedirs(bdir) + + env = conf.env + + subdir = os.path.join(dir, "libdir") + + os.makedirs(subdir) + + Utils.writef(os.path.join(subdir, 'lib1.c'), 'int lib_func(void) { return 42; }\n') + Utils.writef(os.path.join(dir, 'main.c'), + 'int lib_func(void);\n' + 'int main(void) {return !(lib_func() == 42);}\n') + + bld = Build.BuildContext() + bld.log = conf.log + bld.all_envs.update(conf.all_envs) + bld.all_envs['default'] = env + bld.lst_variants = bld.all_envs.keys() + bld.load_dirs(dir, bdir) + + bld.rescan(bld.srcnode) + + ldflags = [] + if version_script: + ldflags.append("-Wl,--version-script=%s/vscript" % bld.path.abspath()) + Utils.writef(os.path.join(dir,'vscript'), 'TEST_1.0A2 { global: *; };\n') + + bld(features='c cshlib', + source='libdir/lib1.c', + target='libdir/lib1', + ldflags=ldflags, + name='lib1') + + o = bld(features='c cprogram', + source='main.c', + target='prog1', + uselib_local='lib1') + + if rpath: + o.rpath=os.path.join(bdir, 'default/libdir') + + # compile the program + try: + bld.compile() + except: + conf.check_message(msg, '', False) + return False + + # path for execution + lastprog = o.link_task.outputs[0].abspath(env) + + if not rpath: + if 'LD_LIBRARY_PATH' in os.environ: + old_ld_library_path = os.environ['LD_LIBRARY_PATH'] + else: + old_ld_library_path = None + ADD_LD_LIBRARY_PATH(os.path.join(bdir, 'default/libdir')) + + # we need to run the program, try to get its result + args = conf.SAMBA_CROSS_ARGS(msg=msg) + proc = Utils.subprocess.Popen([lastprog] + args, + stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE) + (out, err) = proc.communicate() + w = conf.log.write + w(str(out)) + w('\n') + w(str(err)) + w('\nreturncode %r\n' % proc.returncode) + ret = (proc.returncode == 0) + + if not rpath: + os.environ['LD_LIBRARY_PATH'] = old_ld_library_path or '' + + conf.check_message(msg, '', ret) + return ret + + + +@conf +def CHECK_PERL_MANPAGE(conf, msg=None, section=None): + '''work out what extension perl uses for manpages''' + + if msg is None: + if section: + msg = "perl man%s extension" % section + else: + msg = "perl manpage generation" + + conf.start_msg(msg) + + dir = find_config_dir(conf) + + bdir = os.path.join(dir, 'testbuild') + if not os.path.exists(bdir): + os.makedirs(bdir) + + Utils.writef(os.path.join(bdir, 'Makefile.PL'), """ +use ExtUtils::MakeMaker; +WriteMakefile( + 'NAME' => 'WafTest', + 'EXE_FILES' => [ 'WafTest' ] +); +""") + back = os.path.abspath('.') + os.chdir(bdir) + proc = Utils.subprocess.Popen(['perl', 'Makefile.PL'], + stdout=Utils.subprocess.PIPE, + stderr=Utils.subprocess.PIPE) + (out, err) = proc.communicate() + os.chdir(back) + + ret = (proc.returncode == 0) + if not ret: + conf.end_msg('not found', color='YELLOW') + return + + if section: + man = Utils.readf(os.path.join(bdir,'Makefile')) + m = re.search(r'MAN%sEXT\s+=\s+(\w+)' % section, man) + if not m: + conf.end_msg('not found', color='YELLOW') + return + ext = m.group(1) + conf.end_msg(ext) + return ext + + conf.end_msg('ok') + return True + + +@conf +def CHECK_COMMAND(conf, cmd, msg=None, define=None, on_target=True, boolean=False): + '''run a command and return result''' + if msg is None: + msg = 'Checking %s' % ' '.join(cmd) + conf.COMPOUND_START(msg) + cmd = cmd[:] + if on_target: + cmd.extend(conf.SAMBA_CROSS_ARGS(msg=msg)) + try: + ret = get_string(Utils.cmd_output(cmd)) + except: + conf.COMPOUND_END(False) + return False + if boolean: + conf.COMPOUND_END('ok') + if define: + conf.DEFINE(define, '1') + else: + ret = ret.strip() + conf.COMPOUND_END(ret) + if define: + conf.DEFINE(define, ret, quote=True) + return ret + + +@conf +def CHECK_UNAME(conf): + '''setup SYSTEM_UNAME_* defines''' + ret = True + for v in "sysname machine release version".split(): + if not conf.CHECK_CODE(''' + int printf(const char *format, ...); + struct utsname n; + if (uname(&n) == -1) return -1; + printf("%%s", n.%s); + ''' % v, + define='SYSTEM_UNAME_%s' % v.upper(), + execute=True, + define_ret=True, + quote=True, + headers='sys/utsname.h', + local_include=False, + msg="Checking uname %s type" % v): + ret = False + return ret + +@conf +def CHECK_INLINE(conf): + '''check for the right value for inline''' + conf.COMPOUND_START('Checking for inline') + for i in ['inline', '__inline__', '__inline']: + ret = conf.CHECK_CODE(''' + typedef int foo_t; + static %s foo_t static_foo () {return 0; } + %s foo_t foo () {return 0; }\n''' % (i, i), + define='INLINE_MACRO', + addmain=False, + link=False) + if ret: + if i != 'inline': + conf.DEFINE('inline', i, quote=False) + break + if not ret: + conf.COMPOUND_END(ret) + else: + conf.COMPOUND_END(i) + return ret + +@conf +def CHECK_XSLTPROC_MANPAGES(conf): + '''check if xsltproc can run with the given stylesheets''' + + + if not conf.CONFIG_SET('XSLTPROC'): + conf.find_program('xsltproc', var='XSLTPROC') + if not conf.CONFIG_SET('XSLTPROC'): + return False + + s='http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' + conf.CHECK_COMMAND('%s --nonet %s 2> /dev/null' % (conf.env.get_flat('XSLTPROC'), s), + msg='Checking for stylesheet %s' % s, + define='XSLTPROC_MANPAGES', on_target=False, + boolean=True) + if not conf.CONFIG_SET('XSLTPROC_MANPAGES'): + print("A local copy of the docbook.xsl wasn't found on your system" \ + " consider installing package like docbook-xsl") + +# +# Determine the standard libpath for the used compiler, +# so we can later use that to filter out these standard +# library paths when some tools like cups-config or +# python-config report standard lib paths with their +# ldflags (-L...) +# +@conf +def CHECK_STANDARD_LIBPATH(conf): + # at least gcc and clang support this: + try: + cmd = conf.env.CC + ['-print-search-dirs'] + out = get_string(Utils.cmd_output(cmd)).split('\n') + except ValueError: + # option not supported by compiler - use a standard list of directories + dirlist = [ '/usr/lib', '/usr/lib64' ] + except: + raise Errors.WafError('Unexpected error running "%s"' % (cmd)) + else: + dirlist = [] + for line in out: + line = line.strip() + if line.startswith("libraries: ="): + dirliststr = line[len("libraries: ="):] + dirlist = [ os.path.normpath(x) for x in dirliststr.split(':') ] + break + + conf.env.STANDARD_LIBPATH = dirlist + diff --git a/buildtools/wafsamba/samba_cross.py b/buildtools/wafsamba/samba_cross.py new file mode 100644 index 0000000..7ec1edc --- /dev/null +++ b/buildtools/wafsamba/samba_cross.py @@ -0,0 +1,175 @@ +# functions for handling cross-compilation + +import os, sys, re, shlex +from waflib import Utils, Logs, Options, Errors, Context +from waflib.Configure import conf +from wafsamba import samba_utils + +real_Popen = None + +ANSWER_UNKNOWN = (254, "") +ANSWER_NO = (1, "") +ANSWER_OK = (0, "") + +cross_answers_incomplete = False + + +def add_answer(ca_file, msg, answer): + '''add an answer to a set of cross answers''' + try: + f = open(ca_file, 'a') + except: + Logs.error("Unable to open cross-answers file %s" % ca_file) + sys.exit(1) + (retcode, retstring) = answer + # if retstring is more than one line then we probably + # don't care about its actual content (the tests should + # yield one-line output in order to comply with the cross-answer + # format) + retstring = retstring.strip() + if len(retstring.split('\n')) > 1: + retstring = '' + answer = (retcode, retstring) + + if answer == ANSWER_OK: + f.write('%s: OK\n' % msg) + elif answer == ANSWER_UNKNOWN: + f.write('%s: UNKNOWN\n' % msg) + elif answer == ANSWER_NO: + f.write('%s: NO\n' % msg) + else: + if retcode == 0: + f.write('%s: "%s"\n' % (msg, retstring)) + else: + f.write('%s: (%d, "%s")\n' % (msg, retcode, retstring)) + f.close() + + +def cross_answer(ca_file, msg): + '''return a (retcode,retstring) tuple from a answers file''' + try: + f = open(ca_file, 'r') + except: + return ANSWER_UNKNOWN + for line in f: + line = line.strip() + if line == '' or line[0] == '#': + continue + if line.find(':') != -1: + a = line.split(':', 1) + thismsg = a[0].strip() + if thismsg != msg: + continue + ans = a[1].strip() + if ans == "OK" or ans == "YES": + f.close() + return ANSWER_OK + elif ans == "UNKNOWN": + f.close() + return ANSWER_UNKNOWN + elif ans == "FAIL" or ans == "NO": + f.close() + return ANSWER_NO + elif ans[0] == '"': + f.close() + return (0, ans.strip('"')) + elif ans[0] == "'": + f.close() + return (0, ans.strip("'")) + else: + m = re.match(r'\(\s*(-?\d+)\s*,\s*\"(.*)\"\s*\)', ans) + if m: + f.close() + return (int(m.group(1)), m.group(2)) + else: + raise Errors.WafError("Bad answer format '%s' in %s" % (line, ca_file)) + f.close() + return ANSWER_UNKNOWN + + +class cross_Popen(Utils.subprocess.Popen): + '''cross-compilation wrapper for Popen''' + def __init__(*k, **kw): + (obj, args) = k + use_answers = False + ans = ANSWER_UNKNOWN + + # Three possibilities: + # 1. Only cross-answers - try the cross-answers file, and if + # there's no corresponding answer, add to the file and mark + # the configure process as unfinished. + # 2. Only cross-execute - get the answer from cross-execute + # 3. Both - try the cross-answers file, and if there is no + # corresponding answer - use cross-execute to get an answer, + # and add that answer to the file. + if '--cross-answers' in args: + # when --cross-answers is set, then change the arguments + # to use the cross answers if available + use_answers = True + i = args.index('--cross-answers') + ca_file = args[i+1] + msg = args[i+2] + ans = cross_answer(ca_file, msg) + + if '--cross-execute' in args and ans == ANSWER_UNKNOWN: + # when --cross-execute is set, then change the arguments + # to use the cross emulator + i = args.index('--cross-execute') + newargs = shlex.split(args[i+1]) + newargs.extend(args[0:i]) + if use_answers: + p = real_Popen(newargs, + stdout=Utils.subprocess.PIPE, + stderr=Utils.subprocess.PIPE, + env=kw.get('env', {})) + ce_out, ce_err = p.communicate() + ans = (p.returncode, samba_utils.get_string(ce_out)) + add_answer(ca_file, msg, ans) + else: + args = newargs + + if use_answers: + if ans == ANSWER_UNKNOWN: + global cross_answers_incomplete + cross_answers_incomplete = True + add_answer(ca_file, msg, ans) + (retcode, retstring) = ans + args = ['/bin/sh', '-c', "printf %%s '%s'; exit %d" % (retstring, retcode)] + real_Popen.__init__(*(obj, args), **kw) + + +@conf +def SAMBA_CROSS_ARGS(conf, msg=None): + '''get test_args to pass when running cross compiled binaries''' + if not conf.env.CROSS_COMPILE: + return [] + + global real_Popen + if real_Popen is None: + real_Popen = Utils.subprocess.Popen + Utils.subprocess.Popen = cross_Popen + Utils.run_process = Utils.run_regular_process + Utils.get_process = Utils.alloc_process_pool = Utils.nada + + ret = [] + + if conf.env.CROSS_EXECUTE: + ret.extend(['--cross-execute', conf.env.CROSS_EXECUTE]) + + if conf.env.CROSS_ANSWERS: + if msg is None: + raise Errors.WafError("Cannot have NULL msg in cross-answers") + ret.extend(['--cross-answers', os.path.join(Context.launch_dir, conf.env.CROSS_ANSWERS), msg]) + + if ret == []: + raise Errors.WafError("Cannot cross-compile without either --cross-execute or --cross-answers") + + return ret + +@conf +def SAMBA_CROSS_CHECK_COMPLETE(conf): + '''check if we have some unanswered questions''' + global cross_answers_incomplete + if conf.env.CROSS_COMPILE and cross_answers_incomplete: + raise Errors.WafError("Cross answers file %s is incomplete" % conf.env.CROSS_ANSWERS) + return True diff --git a/buildtools/wafsamba/samba_deps.py b/buildtools/wafsamba/samba_deps.py new file mode 100644 index 0000000..80379d3 --- /dev/null +++ b/buildtools/wafsamba/samba_deps.py @@ -0,0 +1,1314 @@ +# Samba automatic dependency handling and project rules + +import os, sys, re + +from waflib import Build, Options, Logs, Utils, Errors, Task +from waflib.Logs import debug +from waflib.Configure import conf +from waflib import ConfigSet + +from samba_utils import LOCAL_CACHE, TO_LIST, get_tgt_list, unique_list +from samba_autoconf import library_flags + +@conf +def ADD_GLOBAL_DEPENDENCY(ctx, dep): + '''add a dependency for all binaries and libraries''' + if not 'GLOBAL_DEPENDENCIES' in ctx.env: + ctx.env.GLOBAL_DEPENDENCIES = [] + ctx.env.GLOBAL_DEPENDENCIES.append(dep) + + +@conf +def BREAK_CIRCULAR_LIBRARY_DEPENDENCIES(ctx): + '''indicate that circular dependencies between libraries should be broken.''' + ctx.env.ALLOW_CIRCULAR_LIB_DEPENDENCIES = True + + +@conf +def SET_SYSLIB_DEPS(conf, target, deps): + '''setup some implied dependencies for a SYSLIB''' + cache = LOCAL_CACHE(conf, 'SYSLIB_DEPS') + cache[target] = deps + + +def expand_subsystem_deps(bld): + '''expand the reverse dependencies resulting from subsystem + attributes of modules. This is walking over the complete list + of declared subsystems, and expands the samba_deps_extended list for any + module<->subsystem dependencies''' + + subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + for subsystem_name in subsystem_list: + bld.ASSERT(subsystem_name in targets, "Subsystem target %s not declared" % subsystem_name) + type = targets[subsystem_name] + if type == 'DISABLED' or type == 'EMPTY': + continue + + # for example, + # subsystem_name = dcerpc_server (a subsystem) + # subsystem = dcerpc_server (a subsystem object) + # module_name = rpc_epmapper (a module within the dcerpc_server subsystem) + # module = rpc_epmapper (a module object within the dcerpc_server subsystem) + + subsystem = bld.get_tgen_by_name(subsystem_name) + bld.ASSERT(subsystem is not None, "Unable to find subsystem %s" % subsystem_name) + for d in subsystem_list[subsystem_name]: + module_name = d['TARGET'] + module_type = targets[module_name] + if module_type in ['DISABLED', 'EMPTY']: + continue + bld.ASSERT(subsystem is not None, + "Subsystem target %s for %s (%s) not found" % (subsystem_name, module_name, module_type)) + if module_type in ['SUBSYSTEM']: + # if a module is a plain object type (not a library) then the + # subsystem it is part of needs to have it as a dependency, so targets + # that depend on this subsystem get the modules of that subsystem + subsystem.samba_deps_extended.append(module_name) + subsystem.samba_deps_extended = unique_list(subsystem.samba_deps_extended) + + + +def build_dependencies(self): + '''This builds the dependency list for a target. It runs after all the targets are declared + + The reason this is not just done in the SAMBA_*() rules is that we have no way of knowing + the full dependency list for a target until we have all of the targets declared. + ''' + + if self.samba_type in ['LIBRARY', 'PLUGIN', 'BINARY', 'PYTHON']: + self.uselib = list(self.final_syslibs) + self.uselib_local = list(self.final_libs) + self.add_objects = list(self.final_objects) + + # extra link flags from pkg_config + libs = self.final_syslibs.copy() + + (cflags, ldflags, cpppath) = library_flags(self, list(libs)) + new_ldflags = getattr(self, 'samba_ldflags', [])[:] + new_ldflags.extend(ldflags) + self.ldflags = new_ldflags + + if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ldflags: + for f in self.env.undefined_ldflags: + self.ldflags.remove(f) + + if getattr(self, 'allow_undefined_symbols', False) and self.env.undefined_ignore_ldflags: + for f in self.env.undefined_ignore_ldflags: + self.ldflags.append(f) + + debug('deps: computed dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', + self.sname, self.uselib, self.uselib_local, self.add_objects) + + if self.samba_type in ['SUBSYSTEM', 'BUILTIN']: + # this is needed for the cflags of libs that come from pkg_config + self.uselib = list(self.final_syslibs) + self.uselib.extend(list(self.direct_syslibs)) + for lib in self.final_libs: + t = self.bld.get_tgen_by_name(lib) + self.uselib.extend(list(t.final_syslibs)) + self.uselib = unique_list(self.uselib) + + if getattr(self, 'uselib', None): + up_list = [] + for l in self.uselib: + up_list.append(l.upper()) + self.uselib = up_list + + +def build_includes(self): + '''This builds the right set of includes for a target. + + One tricky part of this is that the includes= attribute for a + target needs to use paths which are relative to that targets + declaration directory (which we can get at via t.path). + + The way this works is the includes list gets added as + samba_includes in the main build task declaration. Then this + function runs after all of the tasks are declared, and it + processes the samba_includes attribute to produce a includes= + attribute + ''' + + if getattr(self, 'samba_includes', None) is None: + return + + bld = self.bld + + inc_deps = includes_objects(bld, self, set(), {}) + + includes = [] + + # maybe add local includes + if getattr(self, 'local_include', True) and getattr(self, 'local_include_first', True): + includes.append('.') + + includes.extend(self.samba_includes_extended) + + if 'EXTRA_INCLUDES' in bld.env and getattr(self, 'global_include', True): + includes.extend(bld.env['EXTRA_INCLUDES']) + + includes.append('#') + + inc_set = set() + inc_abs = [] + + for d in inc_deps: + t = bld.get_tgen_by_name(d) + bld.ASSERT(t is not None, "Unable to find dependency %s for %s" % (d, self.sname)) + inclist = getattr(t, 'samba_includes_extended', [])[:] + if getattr(t, 'local_include', True): + inclist.append('.') + if inclist == []: + continue + tpath = t.samba_abspath + for inc in inclist: + npath = tpath + '/' + inc + if not npath in inc_set: + inc_abs.append(npath) + inc_set.add(npath) + + mypath = self.path.abspath(bld.env) + for inc in inc_abs: + relpath = os.path.relpath(inc, mypath) + includes.append(relpath) + + if getattr(self, 'local_include', True) and not getattr(self, 'local_include_first', True): + includes.append('.') + + # now transform the includes list to be relative to the top directory + # which is represented by '#' in waf. This allows waf to cache the + # includes lists more efficiently + includes_top = [] + for i in includes: + if i[0] == '#': + # some are already top based + includes_top.append(i) + continue + absinc = os.path.join(self.path.abspath(), i) + relinc = os.path.relpath(absinc, self.bld.srcnode.abspath()) + includes_top.append('#' + relinc) + + self.includes = unique_list(includes_top) + debug('deps: includes for target %s: includes=%s', + self.sname, self.includes) + + +def add_init_functions(self): + '''This builds the right set of init functions''' + + bld = self.bld + + subsystems = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') + + # cope with the separated object lists from BINARY and LIBRARY targets + sname = self.sname + if sname.endswith('.objlist'): + sname = sname[0:-8] + + modules = [] + if sname in subsystems: + modules.append(sname) + + m = getattr(self, 'samba_modules', None) + if m is not None: + modules.extend(TO_LIST(m)) + + m = getattr(self, 'samba_subsystem', None) + if m is not None: + modules.append(m) + + if 'pyembed' in self.features: + return + + sentinel = getattr(self, 'init_function_sentinel', 'NULL') + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + cflags = getattr(self, 'samba_cflags', [])[:] + + if modules == []: + sname = sname.replace('-','_') + sname = sname.replace('.','_') + sname = sname.replace('/','_') + cflags.append('-DSTATIC_%s_MODULES=%s' % (sname, sentinel)) + if sentinel == 'NULL': + proto = "extern void __%s_dummy_module_proto(void)" % (sname) + cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (sname, proto)) + self.cflags = cflags + return + + for m in modules: + bld.ASSERT(m in subsystems, + "No init_function defined for module '%s' in target '%s'" % (m, self.sname)) + init_fn_list = [] + for d in subsystems[m]: + if targets[d['TARGET']] != 'DISABLED': + init_fn_list.append(d['INIT_FUNCTION']) + if init_fn_list == []: + cflags.append('-DSTATIC_%s_MODULES=%s' % (m, sentinel)) + if sentinel == 'NULL': + proto = "extern void __%s_dummy_module_proto(void)" % (m) + cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) + else: + cflags.append('-DSTATIC_%s_MODULES=%s' % (m, ','.join(init_fn_list) + ',' + sentinel)) + proto = "".join('_MODULE_PROTO(%s)' % f for f in init_fn_list) +\ + "extern void __%s_dummy_module_proto(void)" % (m) + cflags.append('-DSTATIC_%s_MODULES_PROTO=%s' % (m, proto)) + self.cflags = cflags + + +def check_duplicate_sources(bld, tgt_list): + '''see if we are compiling the same source file more than once''' + + debug('deps: checking for duplicate sources') + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + for t in tgt_list: + source_list = TO_LIST(getattr(t, 'source', '')) + tpath = os.path.normpath(os.path.relpath(t.path.abspath(bld.env), t.env.BUILD_DIRECTORY + '/default')) + obj_sources = set() + for s in source_list: + if not isinstance(s, str): + print('strange path in check_duplicate_sources %r' % s) + s = s.abspath() + p = os.path.normpath(os.path.join(tpath, s)) + if p in obj_sources: + Logs.error("ERROR: source %s appears twice in target '%s'" % (p, t.sname)) + sys.exit(1) + obj_sources.add(p) + t.samba_source_set = obj_sources + + subsystems = {} + + # build a list of targets that each source file is part of + for t in tgt_list: + if not targets[t.sname] in [ 'LIBRARY', 'PLUGIN', 'BINARY', 'PYTHON' ]: + continue + for obj in t.add_objects: + t2 = t.bld.get_tgen_by_name(obj) + source_set = getattr(t2, 'samba_source_set', set()) + for s in source_set: + if not s in subsystems: + subsystems[s] = {} + if not t.sname in subsystems[s]: + subsystems[s][t.sname] = [] + subsystems[s][t.sname].append(t2.sname) + + for s in subsystems: + if len(subsystems[s]) > 1 and Options.options.SHOW_DUPLICATES: + Logs.warn("WARNING: source %s is in more than one target: %s" % (s, subsystems[s].keys())) + for tname in subsystems[s]: + if len(subsystems[s][tname]) > 1: + raise Errors.WafError("ERROR: source %s is in more than one subsystem of target '%s': %s" % (s, tname, subsystems[s][tname])) + + return True + +def check_group_ordering(bld, tgt_list): + '''see if we have any dependencies that violate the group ordering + + It is an error for a target to depend on a target from a later + build group + ''' + + def group_name(g): + tm = bld.task_manager + return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] + + for g in bld.task_manager.groups: + gname = group_name(g) + for t in g.tasks_gen: + t.samba_group = gname + + grp_map = {} + idx = 0 + for g in bld.task_manager.groups: + name = group_name(g) + grp_map[name] = idx + idx += 1 + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + ret = True + for t in tgt_list: + tdeps = getattr(t, 'add_objects', []) + getattr(t, 'uselib_local', []) + for d in tdeps: + t2 = bld.get_tgen_by_name(d) + if t2 is None: + continue + map1 = grp_map[t.samba_group] + map2 = grp_map[t2.samba_group] + + if map2 > map1: + Logs.error("Target %r in build group %r depends on target %r from later build group %r" % ( + t.sname, t.samba_group, t2.sname, t2.samba_group)) + ret = False + + return ret +Build.BuildContext.check_group_ordering = check_group_ordering + +def show_final_deps(bld, tgt_list): + '''show the final dependencies for all targets''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + for t in tgt_list: + if not targets[t.sname] in ['LIBRARY', 'PLUGIN', 'BINARY', 'PYTHON', 'SUBSYSTEM', 'BUILTIN']: + continue + debug('deps: final dependencies for target %s: uselib=%s uselib_local=%s add_objects=%s', + t.sname, t.uselib, getattr(t, 'uselib_local', []), getattr(t, 'add_objects', [])) + + +def add_samba_attributes(bld, tgt_list): + '''ensure a target has the required samba attributes''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + for t in tgt_list: + if t.name != '': + t.sname = t.name + else: + t.sname = t.target + t.samba_type = targets[t.sname] + t.samba_abspath = t.path.abspath(bld.env) + t.samba_deps_extended = t.samba_deps[:] + t.samba_includes_extended = TO_LIST(t.samba_includes)[:] + t.cflags = getattr(t, 'samba_cflags', '') + +def replace_builtin_subsystem_deps(bld, tgt_list): + '''replace dependencies based on builtin subsystems/libraries + + ''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + # If either the target or the dependency require builtin linking + # we should replace the dependency + for t in tgt_list: + t_require_builtin_deps = getattr(t, 'samba_require_builtin_deps', False) + if t_require_builtin_deps: + debug("deps: target %s: requires builtin dependencies..." % (t.sname)) + else: + debug("deps: target %s: does not require builtin dependencies..." % (t.sname)) + + replacing = {} + + for dep in t.samba_deps_extended: + bld.ASSERT(dep in targets, "target %s: dependency target %s not declared" % (t.sname, dep)) + dtype = targets[dep] + bld.ASSERT(dtype != 'BUILTIN', "target %s: dependency target %s is BUILTIN" % (t.sname, dep)) + bld.ASSERT(dtype != 'PLUGIN', "target %s: dependency target %s is PLUGIN" % (t.sname, dep)) + if dtype not in ['SUBSYSTEM', 'LIBRARY']: + debug("deps: target %s: keep %s dependency %s" % (t.sname, dtype, dep)) + continue + dt = bld.get_tgen_by_name(dep) + bld.ASSERT(dt is not None, "target %s: dependency target %s not found by name" % (t.sname, dep)) + dt_require_builtin_deps = getattr(dt, 'samba_require_builtin_deps', False) + if not dt_require_builtin_deps and not t_require_builtin_deps: + # both target and dependency don't require builtin linking + continue + sdt = getattr(dt, 'samba_builtin_subsystem', None) + if not t_require_builtin_deps: + if sdt is None: + debug("deps: target %s: dependency %s requires builtin deps only" % (t.sname, dep)) + continue + debug("deps: target %s: dependency %s requires builtin linking" % (t.sname, dep)) + bld.ASSERT(sdt is not None, "target %s: dependency target %s is missing samba_builtin_subsystem" % (t.sname, dep)) + sdep = sdt.sname + bld.ASSERT(sdep in targets, "target %s: builtin dependency target %s (from %s) not declared" % (t.sname, sdep, dep)) + sdt = targets[sdep] + bld.ASSERT(sdt == 'BUILTIN', "target %s: builtin dependency target %s (from %s) is not BUILTIN" % (t.sname, sdep, dep)) + replacing[dep] = sdep + + for i in range(len(t.samba_deps_extended)): + dep = t.samba_deps_extended[i] + if dep in replacing: + sdep = replacing[dep] + debug("deps: target %s: replacing dependency %s with builtin subsystem %s" % (t.sname, dep, sdep)) + t.samba_deps_extended[i] = sdep + +def replace_grouping_libraries(bld, tgt_list): + '''replace dependencies based on grouping libraries + + If a library is marked as a grouping library, then any target that + depends on a subsystem that is part of that grouping library gets + that dependency replaced with a dependency on the grouping library + ''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + grouping = {} + + # find our list of grouping libraries, mapped from the subsystems they depend on + for t in tgt_list: + if not getattr(t, 'grouping_library', False): + continue + for dep in t.samba_deps_extended: + bld.ASSERT(dep in targets, "grouping library target %s not declared in %s" % (dep, t.sname)) + if targets[dep] == 'SUBSYSTEM': + grouping[dep] = t.sname + + # now replace any dependencies on elements of grouping libraries + for t in tgt_list: + for i in range(len(t.samba_deps_extended)): + dep = t.samba_deps_extended[i] + if dep in grouping: + if t.sname != grouping[dep]: + debug("deps: target %s: replacing dependency %s with grouping library %s" % (t.sname, dep, grouping[dep])) + t.samba_deps_extended[i] = grouping[dep] + + + +def build_direct_deps(bld, tgt_list): + '''build the direct_objects and direct_libs sets for each target''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + syslib_deps = LOCAL_CACHE(bld, 'SYSLIB_DEPS') + + global_deps = bld.env.GLOBAL_DEPENDENCIES + global_deps_exclude = set() + for dep in global_deps: + t = bld.get_tgen_by_name(dep) + for d in t.samba_deps: + # prevent loops from the global dependencies list + global_deps_exclude.add(d) + global_deps_exclude.add(d + '.objlist') + + for t in tgt_list: + t.direct_objects = set() + t.direct_libs = set() + t.direct_syslibs = set() + deps = t.samba_deps_extended[:] + if getattr(t, 'samba_use_global_deps', False) and not t.sname in global_deps_exclude: + deps.extend(global_deps) + for d in deps: + if d == t.sname: continue + if not d in targets: + Logs.error("Unknown dependency '%s' in '%s'" % (d, t.sname)) + sys.exit(1) + if targets[d] in [ 'EMPTY', 'DISABLED' ]: + continue + if targets[d] == 'PYTHON' and targets[t.sname] != 'PYTHON' and t.sname.find('.objlist') == -1: + # this check should be more restrictive, but for now we have pidl-generated python + # code that directly depends on other python modules + Logs.error('ERROR: Target %s has dependency on python module %s' % (t.sname, d)) + sys.exit(1) + if targets[d] == 'SYSLIB': + t.direct_syslibs.add(d) + if d in syslib_deps: + for implied in TO_LIST(syslib_deps[d]): + if targets[implied] == 'SUBSYSTEM': + it = bld.get_tgen_by_name(implied) + sit = getattr(it, 'samba_builtin_subsystem', None) + if sit: + implied = sit.sname + if targets[implied] == 'BUILTIN': + t.direct_objects.add(implied) + elif targets[implied] == 'SYSLIB': + t.direct_syslibs.add(implied) + elif targets[implied] in ['LIBRARY', 'MODULE']: + t.direct_libs.add(implied) + else: + Logs.error('Implied dependency %s in %s is of type %s' % ( + implied, t.sname, targets[implied])) + sys.exit(1) + continue + t2 = bld.get_tgen_by_name(d) + if t2 is None: + Logs.error("no task %s of type %s in %s" % (d, targets[d], t.sname)) + sys.exit(1) + if t2.samba_type in [ 'LIBRARY', 'MODULE' ]: + t.direct_libs.add(d) + elif t2.samba_type in [ 'SUBSYSTEM', 'BUILTIN', 'ASN1', 'PYTHON' ]: + t.direct_objects.add(d) + elif t2.samba_type in [ 'PLUGIN' ]: + Logs.error('Implicit dependency %s in %s is of type %s' % ( + d, t.sname, t2.samba_type)) + sys.exit(1) + + debug('deps: built direct dependencies') + + +def dependency_loop(loops, t, target): + '''add a dependency loop to the loops dictionary''' + if t.sname == target: + return + if not target in loops: + loops[target] = set() + if not t.sname in loops[target]: + loops[target].add(t.sname) + + +def indirect_libs(bld, t, chain, loops): + '''recursively calculate the indirect library dependencies for a target + + An indirect library is a library that results from a dependency on + a subsystem + ''' + + ret = getattr(t, 'indirect_libs', None) + if ret is not None: + return ret + + ret = set() + for obj in t.direct_objects: + if obj in chain: + dependency_loop(loops, t, obj) + continue + chain.add(obj) + t2 = bld.get_tgen_by_name(obj) + r2 = indirect_libs(bld, t2, chain, loops) + chain.remove(obj) + ret = ret.union(t2.direct_libs) + ret = ret.union(r2) + + for obj in indirect_objects(bld, t, set(), loops): + if obj in chain: + dependency_loop(loops, t, obj) + continue + chain.add(obj) + t2 = bld.get_tgen_by_name(obj) + r2 = indirect_libs(bld, t2, chain, loops) + chain.remove(obj) + ret = ret.union(t2.direct_libs) + ret = ret.union(r2) + + t.indirect_libs = ret + + return ret + + +def indirect_objects(bld, t, chain, loops): + '''recursively calculate the indirect object dependencies for a target + + indirect objects are the set of objects from expanding the + subsystem dependencies + ''' + + ret = getattr(t, 'indirect_objects', None) + if ret is not None: return ret + + ret = set() + for lib in t.direct_objects: + if lib in chain: + dependency_loop(loops, t, lib) + continue + chain.add(lib) + t2 = bld.get_tgen_by_name(lib) + r2 = indirect_objects(bld, t2, chain, loops) + chain.remove(lib) + ret = ret.union(t2.direct_objects) + ret = ret.union(r2) + + t.indirect_objects = ret + return ret + + +def extended_objects(bld, t, chain): + '''recursively calculate the extended object dependencies for a target + + extended objects are the union of: + - direct objects + - indirect objects + - direct and indirect objects of all direct and indirect libraries + ''' + + ret = getattr(t, 'extended_objects', None) + if ret is not None: return ret + + ret = set() + ret = ret.union(t.final_objects) + + for lib in t.final_libs: + if lib in chain: + continue + t2 = bld.get_tgen_by_name(lib) + chain.add(lib) + r2 = extended_objects(bld, t2, chain) + chain.remove(lib) + ret = ret.union(t2.final_objects) + ret = ret.union(r2) + + t.extended_objects = ret + return ret + + +def includes_objects(bld, t, chain, inc_loops): + '''recursively calculate the includes object dependencies for a target + + includes dependencies come from either library or object dependencies + ''' + ret = getattr(t, 'includes_objects', None) + if ret is not None: + return ret + + ret = t.direct_objects.copy() + ret = ret.union(t.direct_libs) + + for obj in t.direct_objects: + if obj in chain: + dependency_loop(inc_loops, t, obj) + continue + chain.add(obj) + t2 = bld.get_tgen_by_name(obj) + r2 = includes_objects(bld, t2, chain, inc_loops) + chain.remove(obj) + ret = ret.union(t2.direct_objects) + ret = ret.union(r2) + + for lib in t.direct_libs: + if lib in chain: + dependency_loop(inc_loops, t, lib) + continue + chain.add(lib) + t2 = bld.get_tgen_by_name(lib) + if t2 is None: + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + Logs.error('Target %s of type %s not found in direct_libs for %s' % ( + lib, targets[lib], t.sname)) + sys.exit(1) + r2 = includes_objects(bld, t2, chain, inc_loops) + chain.remove(lib) + ret = ret.union(t2.direct_objects) + ret = ret.union(r2) + + t.includes_objects = ret + return ret + + +def break_dependency_loops(bld, tgt_list): + '''find and break dependency loops''' + loops = {} + inc_loops = {} + + # build up the list of loops + for t in tgt_list: + indirect_objects(bld, t, set(), loops) + indirect_libs(bld, t, set(), loops) + includes_objects(bld, t, set(), inc_loops) + + # break the loops + for t in tgt_list: + if t.sname in loops: + for attr in ['direct_objects', 'indirect_objects', 'direct_libs', 'indirect_libs']: + objs = getattr(t, attr, set()) + setattr(t, attr, objs.difference(loops[t.sname])) + + for loop in loops: + debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) + + for loop in inc_loops: + debug('deps: Found include loops for target %s : %s', loop, inc_loops[loop]) + + # expand the loops mapping by one level + for loop in loops.copy(): + for tgt in loops[loop]: + if tgt in loops: + loops[loop] = loops[loop].union(loops[tgt]) + + for loop in inc_loops.copy(): + for tgt in inc_loops[loop]: + if tgt in inc_loops: + inc_loops[loop] = inc_loops[loop].union(inc_loops[tgt]) + + + # expand indirect subsystem and library loops + for loop in loops.copy(): + t = bld.get_tgen_by_name(loop) + if t.samba_type in ['SUBSYSTEM', 'BUILTIN']: + loops[loop] = loops[loop].union(t.indirect_objects) + loops[loop] = loops[loop].union(t.direct_objects) + if t.samba_type in ['LIBRARY', 'PLUGIN', 'PYTHON']: + loops[loop] = loops[loop].union(t.indirect_libs) + loops[loop] = loops[loop].union(t.direct_libs) + if loop in loops[loop]: + loops[loop].remove(loop) + + # expand indirect includes loops + for loop in inc_loops.copy(): + t = bld.get_tgen_by_name(loop) + inc_loops[loop] = inc_loops[loop].union(t.includes_objects) + if loop in inc_loops[loop]: + inc_loops[loop].remove(loop) + + # add in the replacement dependencies + for t in tgt_list: + for loop in loops: + for attr in ['indirect_objects', 'indirect_libs']: + objs = getattr(t, attr, set()) + if loop in objs: + diff = loops[loop].difference(objs) + if t.sname in diff: + diff.remove(t.sname) + if diff: + debug('deps: Expanded target %s of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) + objs = objs.union(diff) + setattr(t, attr, objs) + + for loop in inc_loops: + objs = getattr(t, 'includes_objects', set()) + if loop in objs: + diff = inc_loops[loop].difference(objs) + if t.sname in diff: + diff.remove(t.sname) + if diff: + debug('deps: Expanded target %s includes of type %s from loop %s by %s', t.sname, t.samba_type, loop, diff) + objs = objs.union(diff) + setattr(t, 'includes_objects', objs) + + +def reduce_objects(bld, tgt_list): + '''reduce objects by looking for indirect object dependencies''' + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + rely_on = {} + + for t in tgt_list: + t.extended_objects = None + + changed = False + + for type in ['BINARY', 'PYTHON', 'LIBRARY', 'PLUGIN']: + for t in tgt_list: + if t.samba_type != type: continue + # if we will indirectly link to a target then we don't need it + new = t.final_objects.copy() + for l in t.final_libs: + t2 = bld.get_tgen_by_name(l) + t2_obj = extended_objects(bld, t2, set()) + dup = new.intersection(t2_obj) + if t.sname in rely_on: + dup = dup.difference(rely_on[t.sname]) + if dup: + # Do not remove duplicates of BUILTINS + for d in iter(dup.copy()): + dtype = targets[d] + if dtype == 'BUILTIN': + debug('deps: BUILTIN SKIP: removing dups from %s of type %s: %s also in %s %s', + t.sname, t.samba_type, d, t2.samba_type, l) + dup.remove(d) + if len(dup) == 0: + continue + + debug('deps: removing dups from %s of type %s: %s also in %s %s', + t.sname, t.samba_type, dup, t2.samba_type, l) + new = new.difference(dup) + changed = True + if not l in rely_on: + rely_on[l] = set() + rely_on[l] = rely_on[l].union(dup) + for n in iter(new.copy()): + # if we got the builtin version as well + # as the native one, we keep using the + # builtin one and remove the rest. + # Otherwise our check_duplicate_sources() + # checks would trigger! + if n.endswith('.builtin.objlist'): + unused = n.replace('.builtin.objlist', '.objlist') + if unused in new: + new.remove(unused) + unused = n.replace('.builtin.objlist', '') + if unused in new: + new.remove(unused) + t.final_objects = new + + if not changed: + return False + + # add back in any objects that were relied upon by the reduction rules + for r in rely_on: + t = bld.get_tgen_by_name(r) + t.final_objects = t.final_objects.union(rely_on[r]) + + return True + + +def show_library_loop(bld, lib1, lib2, path, seen): + '''show the detailed path of a library loop between lib1 and lib2''' + + t = bld.get_tgen_by_name(lib1) + if not lib2 in getattr(t, 'final_libs', set()): + return + + for d in t.samba_deps_extended: + if d in seen: + continue + seen.add(d) + path2 = path + '=>' + d + if d == lib2: + Logs.warn('library loop path: ' + path2) + return + show_library_loop(bld, d, lib2, path2, seen) + seen.remove(d) + + +def calculate_final_deps(bld, tgt_list, loops): + '''calculate the final library and object dependencies''' + for t in tgt_list: + # start with the maximum possible list + t.final_libs = t.direct_libs.union(indirect_libs(bld, t, set(), loops)) + t.final_objects = t.direct_objects.union(indirect_objects(bld, t, set(), loops)) + + for t in tgt_list: + # don't depend on ourselves + if t.sname in t.final_libs: + t.final_libs.remove(t.sname) + if t.sname in t.final_objects: + t.final_objects.remove(t.sname) + + # handle any non-shared binaries + for t in tgt_list: + if t.samba_type == 'BINARY' and bld.NONSHARED_BINARY(t.sname): + subsystem_list = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + # replace lib deps with objlist deps + for l in t.final_libs: + objname = l + '.objlist' + t2 = bld.get_tgen_by_name(objname) + if t2 is None: + Logs.error('ERROR: subsystem %s not found' % objname) + sys.exit(1) + t.final_objects.add(objname) + t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) + if l in subsystem_list: + # its a subsystem - we also need the contents of any modules + for d in subsystem_list[l]: + module_name = d['TARGET'] + if targets[module_name] == 'LIBRARY': + objname = module_name + '.objlist' + elif targets[module_name] == 'SUBSYSTEM': + objname = module_name + else: + continue + t2 = bld.get_tgen_by_name(objname) + if t2 is None: + Logs.error('ERROR: subsystem %s not found' % objname) + sys.exit(1) + t.final_objects.add(objname) + t.final_objects = t.final_objects.union(extended_objects(bld, t2, set())) + t.final_libs = set() + + # find any library loops + for t in tgt_list: + if t.samba_type in ['LIBRARY', 'PYTHON']: + for l in t.final_libs.copy(): + t2 = bld.get_tgen_by_name(l) + if t.sname in t2.final_libs: + if getattr(bld.env, "ALLOW_CIRCULAR_LIB_DEPENDENCIES", False): + # we could break this in either direction. If one of the libraries + # has a version number, and will this be distributed publicly, then + # we should make it the lower level library in the DAG + Logs.warn('deps: removing library loop %s from %s' % (t.sname, t2.sname)) + dependency_loop(loops, t, t2.sname) + t2.final_libs.remove(t.sname) + else: + Logs.error('ERROR: circular library dependency between %s and %s' + % (t.sname, t2.sname)) + show_library_loop(bld, t.sname, t2.sname, t.sname, set()) + show_library_loop(bld, t2.sname, t.sname, t2.sname, set()) + sys.exit(1) + + for loop in loops: + debug('deps: Found dependency loops for target %s : %s', loop, loops[loop]) + + # we now need to make corrections for any library loops we broke up + # any target that depended on the target of the loop and doesn't + # depend on the source of the loop needs to get the loop source added + for type in ['BINARY','PYTHON','LIBRARY','PLUGIN','BINARY']: + for t in tgt_list: + if t.samba_type != type: continue + for loop in loops: + if loop in t.final_libs: + diff = loops[loop].difference(t.final_libs) + if t.sname in diff: + diff.remove(t.sname) + if t.sname in diff: + diff.remove(t.sname) + # make sure we don't recreate the loop again! + for d in diff.copy(): + t2 = bld.get_tgen_by_name(d) + if t2.samba_type == 'LIBRARY': + if t.sname in t2.final_libs: + debug('deps: removing expansion %s from %s', d, t.sname) + diff.remove(d) + if diff: + debug('deps: Expanded target %s by loop %s libraries (loop %s) %s', t.sname, loop, + loops[loop], diff) + t.final_libs = t.final_libs.union(diff) + + # remove objects that are also available in linked libs + count = 0 + while reduce_objects(bld, tgt_list): + count += 1 + if count > 100: + Logs.warn("WARNING: Unable to remove all inter-target object duplicates") + break + debug('deps: Object reduction took %u iterations', count) + + # add in any syslib dependencies + for t in tgt_list: + if not t.samba_type in ['BINARY','PYTHON','LIBRARY','PLUGIN','SUBSYSTEM','BUILTIN']: + continue + syslibs = set() + for d in t.final_objects: + t2 = bld.get_tgen_by_name(d) + syslibs = syslibs.union(t2.direct_syslibs) + # this adds the indirect syslibs as well, which may not be needed + # depending on the linker flags + for d in t.final_libs: + t2 = bld.get_tgen_by_name(d) + syslibs = syslibs.union(t2.direct_syslibs) + t.final_syslibs = syslibs + + + # find any unresolved library loops + lib_loop_error = False + for t in tgt_list: + if t.samba_type in ['LIBRARY', 'PLUGIN', 'PYTHON']: + for l in t.final_libs.copy(): + t2 = bld.get_tgen_by_name(l) + if t.sname in t2.final_libs: + Logs.error('ERROR: Unresolved library loop %s from %s' % (t.sname, t2.sname)) + lib_loop_error = True + if lib_loop_error: + sys.exit(1) + + debug('deps: removed duplicate dependencies') + + +def show_dependencies(bld, target, seen): + '''recursively show the dependencies of target''' + + if target in seen: + return + + t = bld.get_tgen_by_name(target) + if t is None: + Logs.error("ERROR: Unable to find target '%s'" % target) + sys.exit(1) + + Logs.info('%s(OBJECTS): %s' % (target, t.direct_objects)) + Logs.info('%s(LIBS): %s' % (target, t.direct_libs)) + Logs.info('%s(SYSLIBS): %s' % (target, t.direct_syslibs)) + + seen.add(target) + + for t2 in t.direct_objects: + show_dependencies(bld, t2, seen) + + +def show_object_duplicates(bld, tgt_list): + '''show a list of object files that are included in more than + one library or binary''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + used_by = {} + + Logs.info("showing duplicate objects") + + for t in tgt_list: + if not targets[t.sname] in [ 'LIBRARY', 'PYTHON' ]: + continue + for n in getattr(t, 'final_objects', set()): + t2 = bld.get_tgen_by_name(n) + if not n in used_by: + used_by[n] = set() + used_by[n].add(t.sname) + + for n in used_by: + if len(used_by[n]) > 1: + Logs.info("target '%s' is used by %s" % (n, used_by[n])) + + Logs.info("showing indirect dependency counts (sorted by count)") + + def indirect_count(t): + return len(t.indirect_objects) + + sorted_list = sorted(tgt_list, key=indirect_count, reverse=True) + for t in sorted_list: + if len(t.indirect_objects) > 1: + Logs.info("%s depends on %u indirect objects" % (t.sname, len(t.indirect_objects))) + + +###################################################################### +# this provides a way to save our dependency calculations between runs +savedeps_version = 3 +savedeps_inputs = ['samba_deps', 'samba_includes', 'local_include', 'local_include_first', 'samba_cflags', + 'source', 'grouping_library', 'samba_ldflags', 'allow_undefined_symbols', + 'use_global_deps', 'global_include' ] +savedeps_outputs = ['uselib', 'uselib_local', 'add_objects', 'includes', + 'cflags', 'ldflags', 'samba_deps_extended', 'final_libs'] +savedeps_outenv = ['INC_PATHS'] +savedeps_envvars = ['NONSHARED_BINARIES', 'GLOBAL_DEPENDENCIES', 'EXTRA_CFLAGS', 'EXTRA_LDFLAGS', 'EXTRA_INCLUDES' ] +savedeps_caches = ['GLOBAL_DEPENDENCIES', 'TARGET_TYPE', 'INIT_FUNCTIONS', 'SYSLIB_DEPS'] +savedeps_files = ['buildtools/wafsamba/samba_deps.py'] + +def save_samba_deps(bld, tgt_list): + '''save the dependency calculations between builds, to make + further builds faster''' + denv = ConfigSet.ConfigSet() + + denv.version = savedeps_version + denv.savedeps_inputs = savedeps_inputs + denv.savedeps_outputs = savedeps_outputs + denv.input = {} + denv.output = {} + denv.outenv = {} + denv.caches = {} + denv.envvar = {} + denv.files = {} + + for f in savedeps_files: + denv.files[f] = os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime + + for c in savedeps_caches: + denv.caches[c] = LOCAL_CACHE(bld, c) + + for e in savedeps_envvars: + denv.envvar[e] = bld.env[e] + + for t in tgt_list: + # save all the input attributes for each target + tdeps = {} + for attr in savedeps_inputs: + v = getattr(t, attr, None) + if v is not None: + tdeps[attr] = v + if tdeps != {}: + denv.input[t.sname] = tdeps + + # save all the output attributes for each target + tdeps = {} + for attr in savedeps_outputs: + v = getattr(t, attr, None) + if v is not None: + tdeps[attr] = v + if tdeps != {}: + denv.output[t.sname] = tdeps + + tdeps = {} + for attr in savedeps_outenv: + if attr in t.env: + tdeps[attr] = t.env[attr] + if tdeps != {}: + denv.outenv[t.sname] = tdeps + + depsfile = os.path.join(bld.cache_dir, "sambadeps") + denv.store_fast(depsfile) + + + +def load_samba_deps(bld, tgt_list): + '''load a previous set of build dependencies if possible''' + depsfile = os.path.join(bld.cache_dir, "sambadeps") + denv = ConfigSet.ConfigSet() + try: + debug('deps: checking saved dependencies') + denv.load_fast(depsfile) + if (denv.version != savedeps_version or + denv.savedeps_inputs != savedeps_inputs or + denv.savedeps_outputs != savedeps_outputs): + return False + except Exception: + return False + + # check if critical files have changed + for f in savedeps_files: + if f not in denv.files: + return False + if denv.files[f] != os.stat(os.path.join(bld.srcnode.abspath(), f)).st_mtime: + return False + + # check if caches are the same + for c in savedeps_caches: + if c not in denv.caches or denv.caches[c] != LOCAL_CACHE(bld, c): + return False + + # check if caches are the same + for e in savedeps_envvars: + if e not in denv.envvar or denv.envvar[e] != bld.env[e]: + return False + + # check inputs are the same + for t in tgt_list: + tdeps = {} + for attr in savedeps_inputs: + v = getattr(t, attr, None) + if v is not None: + tdeps[attr] = v + if t.sname in denv.input: + olddeps = denv.input[t.sname] + else: + olddeps = {} + if tdeps != olddeps: + #print '%s: \ntdeps=%s \nodeps=%s' % (t.sname, tdeps, olddeps) + return False + + # put outputs in place + for t in tgt_list: + if not t.sname in denv.output: continue + tdeps = denv.output[t.sname] + for a in tdeps: + setattr(t, a, tdeps[a]) + + # put output env vars in place + for t in tgt_list: + if not t.sname in denv.outenv: continue + tdeps = denv.outenv[t.sname] + for a in tdeps: + t.env[a] = tdeps[a] + + debug('deps: loaded saved dependencies') + return True + + +def generate_clangdb(bld): + classes = [] + for x in ('c', 'cxx'): + cls = Task.classes.get(x) + if cls: + classes.append(cls) + task_classes = tuple(classes) + + tasks = [] + for g in bld.groups: + for tg in g: + if isinstance(tg, Task.Task): + lst = [tg] + else: + lst = tg.tasks + for task in lst: + try: + task.last_cmd + except AttributeError: + continue + if isinstance(task, task_classes): + tasks.append(task) + if len(tasks) == 0: + return + + database_file = bld.bldnode.make_node('compile_commands.json') + Logs.info('Build commands will be stored in %s', + database_file.path_from(bld.path)) + try: + root = database_file.read_json() + except IOError: + root = [] + clang_db = dict((x['file'], x) for x in root) + for task in tasks: + f_node = task.inputs[0] + cmd = task.last_cmd + filename = f_node.path_from(task.get_cwd()) + entry = { + "directory": task.get_cwd().abspath(), + "arguments": cmd, + "file": filename, + } + clang_db[filename] = entry + root = list(clang_db.values()) + database_file.write_json(root) + + +def check_project_rules(bld): + '''check the project rules - ensuring the targets are sane''' + + loops = {} + inc_loops = {} + + tgt_list = get_tgt_list(bld) + + add_samba_attributes(bld, tgt_list) + + force_project_rules = (Options.options.SHOWDEPS or + Options.options.SHOW_DUPLICATES) + + if not force_project_rules and load_samba_deps(bld, tgt_list): + return + + timer = Utils.Timer() + + bld.new_rules = True + Logs.info("Checking project rules ...") + + debug('deps: project rules checking started') + + replace_builtin_subsystem_deps(bld, tgt_list) + + debug("deps: replace_builtin_subsystem_deps: %s" % str(timer)) + + expand_subsystem_deps(bld) + + debug("deps: expand_subsystem_deps: %s" % str(timer)) + + replace_grouping_libraries(bld, tgt_list) + + debug("deps: replace_grouping_libraries: %s" % str(timer)) + + build_direct_deps(bld, tgt_list) + + debug("deps: build_direct_deps: %s" % str(timer)) + + break_dependency_loops(bld, tgt_list) + + debug("deps: break_dependency_loops: %s" % str(timer)) + + if Options.options.SHOWDEPS: + show_dependencies(bld, Options.options.SHOWDEPS, set()) + + calculate_final_deps(bld, tgt_list, loops) + + debug("deps: calculate_final_deps: %s" % str(timer)) + + if Options.options.SHOW_DUPLICATES: + show_object_duplicates(bld, tgt_list) + + # run the various attribute generators + for f in [ build_dependencies, build_includes, add_init_functions ]: + debug('deps: project rules checking %s', f) + for t in tgt_list: f(t) + debug("deps: %s: %s" % (f, str(timer))) + + debug('deps: project rules stage1 completed') + + if not check_duplicate_sources(bld, tgt_list): + Logs.error("Duplicate sources present - aborting") + sys.exit(1) + + debug("deps: check_duplicate_sources: %s" % str(timer)) + + if not bld.check_group_ordering(tgt_list): + Logs.error("Bad group ordering - aborting") + sys.exit(1) + + debug("deps: check_group_ordering: %s" % str(timer)) + + show_final_deps(bld, tgt_list) + + debug("deps: show_final_deps: %s" % str(timer)) + + debug('deps: project rules checking completed - %u targets checked', + len(tgt_list)) + + if not bld.is_install: + save_samba_deps(bld, tgt_list) + + debug("deps: save_samba_deps: %s" % str(timer)) + + Logs.info("Project rules pass") + + if bld.cmd == 'build': + Task.Task.keep_last_cmd = True + bld.add_post_fun(generate_clangdb) + + +def CHECK_PROJECT_RULES(bld): + '''enable checking of project targets for sanity''' + if bld.env.added_project_rules: + return + bld.env.added_project_rules = True + bld.add_pre_fun(check_project_rules) +Build.BuildContext.CHECK_PROJECT_RULES = CHECK_PROJECT_RULES + + diff --git a/buildtools/wafsamba/samba_dist.py b/buildtools/wafsamba/samba_dist.py new file mode 100644 index 0000000..0218cad --- /dev/null +++ b/buildtools/wafsamba/samba_dist.py @@ -0,0 +1,280 @@ +# customised version of 'waf dist' for Samba tools +# uses git ls-files to get file lists + +import os, sys, tarfile +from waflib import Utils, Scripting, Logs, Options +from waflib.Configure import conf +from samba_utils import get_string +from waflib import Context + +dist_dirs = None +dist_files = None +dist_blacklist = "" +dist_archive = None + +class Dist(Context.Context): + # TODO remove + cmd = 'dist' + fun = 'dist' + def execute(self): + Context.g_module.dist() + +class DistCheck(Scripting.DistCheck): + fun = 'distcheck' + cmd = 'distcheck' + def execute(self): + Options.options.distcheck_args = '' + if Context.g_module.distcheck is Scripting.distcheck: + # default + Context.g_module.distcheck(self) + else: + Context.g_module.distcheck() + Context.g_module.dist() + self.check() + def get_arch_name(self): + global dist_archive + return dist_archive + def make_distcheck_cmd(self, tmpdir): + waf = os.path.abspath(sys.argv[0]) + return [sys.executable, waf, 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] + +def add_symlink(tar, fname, abspath, basedir): + '''handle symlinks to directories that may move during packaging''' + if not os.path.islink(abspath): + return False + tinfo = tar.gettarinfo(name=abspath, arcname=fname) + tgt = os.readlink(abspath) + + if dist_dirs: + # we need to find the target relative to the main directory + # this is here to cope with symlinks into the buildtools + # directory from within the standalone libraries in Samba. For example, + # a symlink to ../../builtools/scripts/autogen-waf.sh needs + # to be rewritten as a symlink to buildtools/scripts/autogen-waf.sh + # when the tarball for talloc is built + + # the filename without the appname-version + rel_fname = '/'.join(fname.split('/')[1:]) + + # join this with the symlink target + tgt_full = os.path.join(os.path.dirname(rel_fname), tgt) + + # join with the base directory + tgt_base = os.path.normpath(os.path.join(basedir, tgt_full)) + + # see if this is inside one of our dist_dirs + for dir in dist_dirs.split(): + if dir.find(':') != -1: + destdir=dir.split(':')[1] + dir=dir.split(':')[0] + else: + destdir = '.' + if dir == basedir: + # internal links don't get rewritten + continue + if dir == tgt_base[0:len(dir)] and tgt_base[len(dir)] == '/': + new_tgt = destdir + tgt_base[len(dir):] + tinfo.linkname = new_tgt + break + + tinfo.uid = 0 + tinfo.gid = 0 + tinfo.uname = 'root' + tinfo.gname = 'root' + tar.addfile(tinfo) + return True + +def add_tarfile(tar, fname, abspath, basedir): + '''add a file to the tarball''' + if add_symlink(tar, fname, abspath, basedir): + return + try: + tinfo = tar.gettarinfo(name=abspath, arcname=fname) + except OSError: + Logs.error('Unable to find file %s - missing from git checkout?' % abspath) + sys.exit(1) + tinfo.uid = 0 + tinfo.gid = 0 + tinfo.uname = 'root' + tinfo.gname = 'root' + fh = open(abspath, "rb") + tar.addfile(tinfo, fileobj=fh) + fh.close() + + +def vcs_dir_contents(path): + """Return the versioned files under a path. + + :return: List of paths relative to path + """ + repo = path + while repo != "/": + if os.path.exists(os.path.join(repo, ".git")): + ls_files_cmd = [ 'git', 'ls-files', '--full-name', + os.path.relpath(path, repo) ] + cwd = None + env = dict(os.environ) + env["GIT_DIR"] = os.path.join(repo, ".git") + break + repo = os.path.dirname(repo) + if repo == "/": + raise Exception("unsupported or no vcs for %s" % path) + return get_string(Utils.cmd_output(ls_files_cmd, cwd=cwd, env=env)).split('\n') + + +def dist(appname='', version=''): + + def add_files_to_tarball(tar, srcdir, srcsubdir, dstdir, dstsubdir, blacklist, files): + if blacklist is None: + blacklist = [] + for f in files: + abspath = os.path.join(srcdir, f) + + if srcsubdir != '.': + f = f[len(srcsubdir)+1:] + + # Remove files in the blacklist + if f in blacklist: + continue + blacklisted = False + # Remove directories in the blacklist + for d in blacklist: + if f.startswith(d): + blacklisted = True + if blacklisted: + continue + if os.path.isdir(abspath) and not os.path.islink(abspath): + continue + if dstsubdir != '.': + f = dstsubdir + '/' + f + fname = dstdir + '/' + f + add_tarfile(tar, fname, abspath, srcsubdir) + + + def list_directory_files(path): + curdir = os.getcwd() + os.chdir(srcdir) + out_files = [] + for root, dirs, files in os.walk(path): + for f in files: + out_files.append(os.path.join(root, f)) + os.chdir(curdir) + return out_files + + + if not isinstance(appname, str) or not appname: + # this copes with a mismatch in the calling arguments for dist() + appname = Context.g_module.APPNAME + version = Context.g_module.VERSION + if not version: + version = Context.g_module.VERSION + + srcdir = os.path.normpath( + os.path.join(os.path.dirname(Context.g_module.root_path), + Context.g_module.top)) + + if not dist_dirs: + Logs.error('You must use samba_dist.DIST_DIRS() to set which directories to package') + sys.exit(1) + + dist_base = '%s-%s' % (appname, version) + + if Options.options.SIGN_RELEASE: + dist_name = '%s.tar' % (dist_base) + tar = tarfile.open(dist_name, 'w') + else: + dist_name = '%s.tar.gz' % (dist_base) + tar = tarfile.open(dist_name, 'w:gz') + + blacklist = dist_blacklist.split() + + for dir in dist_dirs.split(): + if dir.find(':') != -1: + destdir=dir.split(':')[1] + dir=dir.split(':')[0] + else: + destdir = '.' + absdir = os.path.join(srcdir, dir) + try: + files = vcs_dir_contents(absdir) + except Exception as e: + Logs.error('unable to get contents of %s: %s' % (absdir, e)) + sys.exit(1) + add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) + + if dist_files: + for file in dist_files.split(): + if file.find(':') != -1: + destfile = file.split(':')[1] + file = file.split(':')[0] + else: + destfile = file + + absfile = os.path.join(srcdir, file) + + if os.path.isdir(absfile) and not os.path.islink(absfile): + destdir = destfile + dir = file + files = list_directory_files(dir) + add_files_to_tarball(tar, srcdir, dir, dist_base, destdir, blacklist, files) + else: + fname = dist_base + '/' + destfile + add_tarfile(tar, fname, absfile, destfile) + + tar.close() + + if Options.options.SIGN_RELEASE: + import gzip + try: + os.unlink(dist_name + '.asc') + except OSError: + pass + + cmd = "gpg --detach-sign --armor " + dist_name + os.system(cmd) + uncompressed_tar = open(dist_name, 'rb') + compressed_tar = gzip.open(dist_name + '.gz', 'wb') + while 1: + buffer = uncompressed_tar.read(1048576) + if buffer: + compressed_tar.write(buffer) + else: + break + uncompressed_tar.close() + compressed_tar.close() + os.unlink(dist_name) + Logs.info('Created %s.gz %s.asc' % (dist_name, dist_name)) + dist_name = dist_name + '.gz' + else: + Logs.info('Created %s' % dist_name) + + # TODO use the ctx object instead + global dist_archive + dist_archive = dist_name + return dist_name + + +@conf +def DIST_DIRS(dirs): + '''set the directories to package, relative to top srcdir''' + global dist_dirs + if not dist_dirs: + dist_dirs = dirs + +@conf +def DIST_FILES(files, extend=False): + '''set additional files for packaging, relative to top srcdir''' + global dist_files + if not dist_files: + dist_files = files + elif extend: + dist_files = dist_files + " " + files + +@conf +def DIST_BLACKLIST(blacklist): + '''set the files to exclude from packaging, relative to top srcdir''' + global dist_blacklist + if not dist_blacklist: + dist_blacklist = blacklist + +Scripting.dist = dist diff --git a/buildtools/wafsamba/samba_git.py b/buildtools/wafsamba/samba_git.py new file mode 100644 index 0000000..fe540ec --- /dev/null +++ b/buildtools/wafsamba/samba_git.py @@ -0,0 +1,58 @@ +import os +import subprocess + +def find_git(env=None): + """Find the git binary.""" + if env is not None and 'GIT' in env: + return env.get_flat('GIT') + + # Get version from GIT + if os.path.exists("/usr/bin/git"): + # this is useful when doing make dist without configuring + return "/usr/bin/git" + + return None + + +def has_submodules(path): + """Check whether a source directory is git-versioned and has submodules. + + :param path: Path to Samba source directory + """ + return (os.path.isdir(os.path.join(path, ".git")) and + os.path.isfile(os.path.join(path, ".gitmodules"))) + + +def read_submodule_status(path, env=None): + """Check status of submodules. + + :param path: Path to git directory + :param env: Optional waf environment + :return: Yields tuples with submodule relpath and status + (one of: 'out-of-date', 'not-checked-out', 'up-to-date') + :raise RuntimeError: raised when parsing of 'git submodule status' output + fails. + """ + if not has_submodules(path): + # No point in running git. + return + git = find_git(env) + if git is None: + return + p = subprocess.Popen([git, "submodule", "status"], stdout=subprocess.PIPE, + cwd=path) + (stdout, stderr) = p.communicate(None) + for l in stdout.splitlines(): + l = l.decode('utf-8') + l = l.rstrip() + status = l[0] + l = l[1:] + parts = l.split(" ") + if len(parts) > 2 and status in ("-", "+"): + yield (parts[1], "out-of-date") + elif len(parts) == 2 and status == "-": + yield (parts[1], "not-checked-out") + elif len(parts) > 2 and status == " ": + yield (parts[1], "up-to-date") + else: + raise RuntimeError("Unable to parse submodule status: %r, %r" % (status, parts)) diff --git a/buildtools/wafsamba/samba_headers.py b/buildtools/wafsamba/samba_headers.py new file mode 100644 index 0000000..37147a9 --- /dev/null +++ b/buildtools/wafsamba/samba_headers.py @@ -0,0 +1,181 @@ +# specialist handling of header files for Samba + +import os, re, sys, fnmatch +from waflib import Build, Logs, Utils, Errors +from samba_utils import TO_LIST + + +def header_install_path(header, header_path): + '''find the installation path for a header, given a header_path option''' + if not header_path: + return '' + if not isinstance(header_path, list): + return header_path + for (p1, dir) in header_path: + for p2 in TO_LIST(p1): + if fnmatch.fnmatch(header, p2): + return dir + # default to current path + return '' + + +re_header = re.compile(r'^\s*#\s*include[ \t]*"([^"]+)"', re.I | re.M) + +# a dictionary mapping source header paths to public header paths +header_map = {} + +def find_suggested_header(hpath): + '''find a suggested header path to use''' + base = os.path.basename(hpath) + ret = [] + for h in header_map: + if os.path.basename(h) == base: + ret.append('<%s>' % header_map[h]) + ret.append('"%s"' % h) + return ret + +def create_public_header(task): + '''create a public header from a private one, output within the build tree''' + src = task.inputs[0].abspath(task.env) + tgt = task.outputs[0].bldpath(task.env) + + if os.path.exists(tgt): + os.unlink(tgt) + + relsrc = os.path.relpath(src, task.env.TOPDIR) + + infile = open(src, mode='r') + outfile = open(tgt, mode='w') + linenumber = 0 + + search_paths = [ '', task.env.RELPATH ] + for i in task.env.EXTRA_INCLUDES: + if i.startswith('#'): + search_paths.append(i[1:]) + + for line in infile: + linenumber += 1 + + # allow some straight substitutions + if task.env.public_headers_replace and line.strip() in task.env.public_headers_replace: + outfile.write(task.env.public_headers_replace[line.strip()] + '\n') + continue + + # see if its an include line + m = re_header.match(line) + if m is None: + outfile.write(line) + continue + + # its an include, get the header path + hpath = m.group(1) + if hpath.startswith("bin/default/"): + hpath = hpath[12:] + + # some are always allowed + if task.env.public_headers_skip and hpath in task.env.public_headers_skip: + outfile.write(line) + continue + + # work out the header this refers to + found = False + for s in search_paths: + p = os.path.normpath(os.path.join(s, hpath)) + if p in header_map: + outfile.write("#include <%s>\n" % header_map[p]) + found = True + break + if found: + continue + + if task.env.public_headers_allow_broken: + Logs.warn("Broken public header include '%s' in '%s'" % (hpath, relsrc)) + outfile.write(line) + continue + + # try to be nice to the developer by suggesting an alternative + suggested = find_suggested_header(hpath) + outfile.close() + os.unlink(tgt) + sys.stderr.write("%s:%u:Error: unable to resolve public header %s (maybe try one of %s)\n" % ( + os.path.relpath(src, os.getcwd()), linenumber, hpath, suggested)) + raise Errors.WafError("Unable to resolve header path '%s' in public header '%s' in directory %s" % ( + hpath, relsrc, task.env.RELPATH)) + infile.close() + outfile.close() + + +def public_headers_simple(bld, public_headers, header_path=None, public_headers_install=True): + '''install some headers - simple version, no munging needed + ''' + if not public_headers_install: + return + for h in TO_LIST(public_headers): + inst_path = header_install_path(h, header_path) + if h.find(':') != -1: + s = h.split(":") + h_name = s[0] + inst_name = s[1] + else: + h_name = h + inst_name = os.path.basename(h) + bld.INSTALL_FILES('${INCLUDEDIR}', h_name, destname=inst_name) + + +def PUBLIC_HEADERS(bld, public_headers, header_path=None, public_headers_install=True): + '''install some headers + + header_path may either be a string that is added to the INCLUDEDIR, + or it can be a dictionary of wildcard patterns which map to destination + directories relative to INCLUDEDIR + ''' + bld.SET_BUILD_GROUP('final') + + if not bld.env.build_public_headers: + # in this case no header munging needed. Used for tdb, talloc etc + public_headers_simple(bld, public_headers, header_path=header_path, + public_headers_install=public_headers_install) + return + + # create the public header in the given path + # in the build tree + for h in TO_LIST(public_headers): + inst_path = header_install_path(h, header_path) + if h.find(':') != -1: + s = h.split(":") + h_name = s[0] + inst_name = s[1] + else: + h_name = h + inst_name = os.path.basename(h) + curdir = bld.path.abspath() + relpath1 = os.path.relpath(bld.srcnode.abspath(), curdir) + relpath2 = os.path.relpath(curdir, bld.srcnode.abspath()) + targetdir = os.path.normpath(os.path.join(relpath1, bld.env.build_public_headers, inst_path)) + if not os.path.exists(os.path.join(curdir, targetdir)): + raise Errors.WafError("missing source directory %s for public header %s" % (targetdir, inst_name)) + target = os.path.join(targetdir, inst_name) + + # the source path of the header, relative to the top of the source tree + src_path = os.path.normpath(os.path.join(relpath2, h_name)) + + # the install path of the header, relative to the public include directory + target_path = os.path.normpath(os.path.join(inst_path, inst_name)) + + header_map[src_path] = target_path + + t = bld.SAMBA_GENERATOR('HEADER_%s/%s/%s' % (relpath2, inst_path, inst_name), + group='headers', + rule=create_public_header, + source=h_name, + target=target) + t.env.RELPATH = relpath2 + t.env.TOPDIR = bld.srcnode.abspath() + if not bld.env.public_headers_list: + bld.env.public_headers_list = [] + bld.env.public_headers_list.append(os.path.join(inst_path, inst_name)) + if public_headers_install: + bld.INSTALL_FILES('${INCLUDEDIR}', + target, + destname=os.path.join(inst_path, inst_name), flat=True) +Build.BuildContext.PUBLIC_HEADERS = PUBLIC_HEADERS diff --git a/buildtools/wafsamba/samba_install.py b/buildtools/wafsamba/samba_install.py new file mode 100644 index 0000000..a43d103 --- /dev/null +++ b/buildtools/wafsamba/samba_install.py @@ -0,0 +1,236 @@ +########################### +# this handles the magic we need to do for installing +# with all the configure options that affect rpath and shared +# library use + +import os +from waflib import Utils, Errors +from waflib.TaskGen import feature, before, after +from samba_utils import LIB_PATH, MODE_755, install_rpath, build_rpath + +@feature('install_bin') +@after('apply_core') +@before('apply_link', 'apply_obj_vars') +def install_binary(self): + '''install a binary, taking account of the different rpath variants''' + bld = self.bld + + # get the ldflags we will use for install and build + install_ldflags = install_rpath(self) + build_ldflags = build_rpath(bld) + + if not self.bld.is_install: + # just need to set rpath if we are not installing + self.env.RPATH = build_ldflags + return + + # work out the install path, expanding variables + install_path = getattr(self, 'samba_inst_path', None) or '${BINDIR}' + install_path = bld.EXPAND_VARIABLES(install_path) + + orig_target = os.path.basename(self.target) + + if install_ldflags != build_ldflags: + # we will be creating a new target name, and using that for the + # install link. That stops us from overwriting the existing build + # target, which has different ldflags + self.target += '.inst' + + # setup the right rpath link flags for the install + self.env.RPATH = install_ldflags + + if not self.samba_install: + # this binary is marked not to be installed + return + + # tell waf to install the right binary + bld.install_as(os.path.join(install_path, orig_target), + self.path.find_or_declare(self.target), + chmod=MODE_755) + + + +@feature('install_lib') +@after('apply_core') +@before('apply_link', 'apply_obj_vars') +def install_library(self): + '''install a library, taking account of the different rpath variants''' + if getattr(self, 'done_install_library', False): + return + + bld = self.bld + + default_env = bld.all_envs['default'] + try: + install_ldflags = install_rpath(self) + build_ldflags = build_rpath(bld) + + if not self.bld.is_install or not getattr(self, 'samba_install', True): + # just need to set the build rpath if we are not installing + self.env.RPATH = build_ldflags + return + + # setup the install path, expanding variables + install_path = getattr(self, 'samba_inst_path', None) + if install_path is None: + if getattr(self, 'private_library', False): + install_path = '${PRIVATELIBDIR}' + else: + install_path = '${LIBDIR}' + install_path = bld.EXPAND_VARIABLES(install_path) + + target_name = self.target + + if install_ldflags != build_ldflags: + # we will be creating a new target name, and using that for the + # install link. That stops us from overwriting the existing build + # target, which has different ldflags + self.done_install_library = True + t = self.clone(self.env) + t.posted = False + t.target += '.inst' + t.name = self.name + '.inst' + self.env.RPATH = build_ldflags + else: + t = self + + t.env.RPATH = install_ldflags + + dev_link = None + + # in the following the names are: + # - inst_name is the name with .inst. in it, in the build + # directory + # - install_name is the name in the install directory + # - install_link is a symlink in the install directory, to install_name + + if getattr(self, 'samba_realname', None): + install_name = self.samba_realname + install_link = None + if getattr(self, 'soname', ''): + install_link = self.soname + if getattr(self, 'samba_type', None) == 'PYTHON': + inst_name = bld.make_libname(t.target, nolibprefix=True, python=True) + else: + inst_name = bld.make_libname(t.target) + elif self.vnum: + vnum_base = self.vnum.split('.')[0] + install_name = bld.make_libname(target_name, version=self.vnum) + install_link = bld.make_libname(target_name, version=vnum_base) + inst_name = bld.make_libname(t.target) + if not self.private_library or not t.env.SONAME_ST: + # only generate the dev link for non-bundled libs + dev_link = bld.make_libname(target_name) + elif getattr(self, 'soname', ''): + install_name = bld.make_libname(target_name) + install_link = self.soname + inst_name = bld.make_libname(t.target) + else: + install_name = bld.make_libname(target_name) + install_link = None + inst_name = bld.make_libname(t.target) + + if t.env.SONAME_ST: + # ensure we get the right names in the library + if install_link: + t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_link) + else: + t.env.append_value('LINKFLAGS', t.env.SONAME_ST % install_name) + t.env.SONAME_ST = '' + + # tell waf to install the library + bld.install_as(os.path.join(install_path, install_name), + self.path.find_or_declare(inst_name), + chmod=MODE_755) + + if install_link and install_link != install_name: + # and the symlink if needed + bld.symlink_as(os.path.join(install_path, install_link), os.path.basename(install_name)) + if dev_link: + bld.symlink_as(os.path.join(install_path, dev_link), os.path.basename(install_name)) + finally: + bld.all_envs['default'] = default_env + + +@feature('cshlib') +@after('apply_implib') +@before('apply_vnum') +def apply_soname(self): + '''install a library, taking account of the different rpath variants''' + + if self.env.SONAME_ST and getattr(self, 'soname', ''): + self.env.append_value('LINKFLAGS', self.env.SONAME_ST % self.soname) + self.env.SONAME_ST = '' + +@feature('cshlib') +@after('apply_implib') +@before('apply_vnum') +def apply_vscript(self): + '''add version-script arguments to library build''' + + if self.env.HAVE_LD_VERSION_SCRIPT and getattr(self, 'version_script', ''): + self.env.append_value('LINKFLAGS', "-Wl,--version-script=%s" % + self.version_script) + self.version_script = None + + +############################## +# handle the creation of links for libraries and binaries in the build tree + +@feature('symlink_lib') +@after('apply_link') +def symlink_lib(self): + '''symlink a shared lib''' + + if self.target.endswith('.inst'): + return + + blddir = os.path.dirname(self.bld.srcnode.abspath(self.bld.env)) + libpath = self.link_task.outputs[0].abspath(self.env) + + # calculate the link target and put it in the environment + soext="" + vnum = getattr(self, 'vnum', None) + if vnum is not None: + soext = '.' + vnum.split('.')[0] + + link_target = getattr(self, 'link_name', '') + if link_target == '': + basename = os.path.basename(self.bld.make_libname(self.target, version=soext)) + if getattr(self, "private_library", False): + link_target = '%s/private/%s' % (LIB_PATH, basename) + else: + link_target = '%s/%s' % (LIB_PATH, basename) + + link_target = os.path.join(blddir, link_target) + + if os.path.lexists(link_target): + if os.path.islink(link_target) and os.readlink(link_target) == libpath: + return + os.unlink(link_target) + + link_container = os.path.dirname(link_target) + if not os.path.isdir(link_container): + os.makedirs(link_container) + + os.symlink(libpath, link_target) + + +@feature('symlink_bin') +@after('apply_link') +def symlink_bin(self): + '''symlink a binary into the build directory''' + + if self.target.endswith('.inst'): + return + + if not self.link_task.outputs or not self.link_task.outputs[0]: + raise Errors.WafError('no outputs found for %s in symlink_bin' % self.name) + binpath = self.link_task.outputs[0].abspath(self.env) + bldpath = os.path.join(self.bld.env.BUILD_DIRECTORY, self.link_task.outputs[0].name) + + if os.path.lexists(bldpath): + if os.path.islink(bldpath) and os.readlink(bldpath) == binpath: + return + os.unlink(bldpath) + os.symlink(binpath, bldpath) diff --git a/buildtools/wafsamba/samba_patterns.py b/buildtools/wafsamba/samba_patterns.py new file mode 100644 index 0000000..4129681 --- /dev/null +++ b/buildtools/wafsamba/samba_patterns.py @@ -0,0 +1,234 @@ +# a waf tool to add extension based build patterns for Samba + +import sys +from waflib import Build +from wafsamba import samba_version_file + +def write_version_header(task): + '''print version.h contents''' + src = task.inputs[0].srcpath(task.env) + + version = samba_version_file(src, task.env.srcdir, env=task.env, is_install=task.generator.bld.is_install) + string = str(version) + + task.outputs[0].write(string) + return 0 + + +def SAMBA_MKVERSION(bld, target, source='VERSION buildtools/wafsamba/samba_version.py'): + '''generate the version.h header for Samba''' + + # We only force waf to re-generate this file if we are installing, + # because only then is information not included in the deps (the + # git revision) included in the version. + t = bld.SAMBA_GENERATOR('VERSION', + rule=write_version_header, + group='setup', + source=source, + target=target, + always=bld.is_install) +Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION + + +def write_build_options_header(fp): + '''write preamble for build_options.c''' + fp.write("/*\n" + " Unix SMB/CIFS implementation.\n" + " Build Options for Samba Suite\n" + " Copyright (C) Vance Lankhaar <vlankhaar@linux.ca> 2003\n" + " Copyright (C) Andrew Bartlett <abartlet@samba.org> 2001\n" + "\n" + " This program is free software; you can redistribute it and/or modify\n" + " it under the terms of the GNU General Public License as published by\n" + " the Free Software Foundation; either version 3 of the License, or\n" + " (at your option) any later version.\n" + "\n" + " This program is distributed in the hope that it will be useful,\n" + " but WITHOUT ANY WARRANTY; without even the implied warranty of\n" + " MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n" + " GNU General Public License for more details.\n" + "\n" + " You should have received a copy of the GNU General Public License\n" + " along with this program; if not, see <http://www.gnu.org/licenses/>.\n" + "*/\n" + "\n" + "#include \"includes.h\"\n" + "#include \"dynconfig/dynconfig.h\"\n" + "#include \"lib/cluster_support.h\"\n" + + "\n" + "static int output(bool screen, const char *format, ...) PRINTF_ATTRIBUTE(2,3);\n" + "void build_options(bool screen);\n" + "\n" + "\n" + "/****************************************************************************\n" + "helper function for build_options\n" + "****************************************************************************/\n" + "static int output(bool screen, const char *format, ...)\n" + "{\n" + " char *ptr = NULL;\n" + " int ret = 0;\n" + " va_list ap;\n" + " \n" + " va_start(ap, format);\n" + " ret = vasprintf(&ptr,format,ap);\n" + " va_end(ap);\n" + "\n" + " if (screen) {\n" + " d_printf(\"%s\", ptr ? ptr : \"\");\n" + " } else {\n" + " DEBUG(4,(\"%s\", ptr ? ptr : \"\"));\n" + " }\n" + " \n" + " SAFE_FREE(ptr);\n" + " return ret;\n" + "}\n" + "\n" + "/****************************************************************************\n" + "options set at build time for the samba suite\n" + "****************************************************************************/\n" + "void build_options(bool screen)\n" + "{\n" + " if ((DEBUGLEVEL < 4) && (!screen)) {\n" + " return;\n" + " }\n" + "\n" + "\n" + " /* Output various paths to files and directories */\n" + " output(screen,\"\\nPaths:\\n\"\n" + " \" SBINDIR: %s\\n\"\n" + " \" BINDIR: %s\\n\"\n" + " \" CONFIGFILE: %s\\n\"\n" + " \" LOGFILEBASE: %s\\n\"\n" + " \" LMHOSTSFILE: %s\\n\"\n" + " \" LIBDIR: %s\\n\"\n" + " \" DATADIR: %s\\n\"\n" + " \" SAMBA_DATADIR: %s\\n\"\n" + " \" MODULESDIR: %s\\n\"\n" + " \" SHLIBEXT: %s\\n\"\n" + " \" LOCKDIR: %s\\n\"\n" + " \" STATEDIR: %s\\n\"\n" + " \" CACHEDIR: %s\\n\"\n" + " \" PIDDIR: %s\\n\"\n" + " \" SMB_PASSWD_FILE: %s\\n\"\n" + " \" PRIVATE_DIR: %s\\n\"\n" + " \" BINDDNS_DIR: %s\\n\",\n" + " get_dyn_SBINDIR(),\n" + " get_dyn_BINDIR(),\n" + " get_dyn_CONFIGFILE(),\n" + " get_dyn_LOGFILEBASE(),\n" + " get_dyn_LMHOSTSFILE(),\n" + " get_dyn_LIBDIR(),\n" + " get_dyn_DATADIR(),\n" + " get_dyn_SAMBA_DATADIR(),\n" + " get_dyn_MODULESDIR(),\n" + " get_dyn_SHLIBEXT(),\n" + " get_dyn_LOCKDIR(),\n" + " get_dyn_STATEDIR(),\n" + " get_dyn_CACHEDIR(),\n" + " get_dyn_PIDDIR(),\n" + " get_dyn_SMB_PASSWD_FILE(),\n" + " get_dyn_PRIVATE_DIR(),\n" + " get_dyn_BINDDNS_DIR());\n" + "\n") + +def write_build_options_footer(fp): + fp.write(" /* Output the sizes of the various cluster features */\n" + " output(screen, \"\\n%s\", cluster_support_features());\n" + "\n" + " /* Output the sizes of the various types */\n" + " output(screen, \"\\nType sizes:\\n\"\n" + " \" sizeof(char): %lu\\n\"\n" + " \" sizeof(int): %lu\\n\"\n" + " \" sizeof(long): %lu\\n\"\n" + " \" sizeof(long long): %lu\\n\"\n" + " \" sizeof(uint8_t): %lu\\n\"\n" + " \" sizeof(uint16_t): %lu\\n\"\n" + " \" sizeof(uint32_t): %lu\\n\"\n" + " \" sizeof(short): %lu\\n\"\n" + " \" sizeof(void*): %lu\\n\"\n" + " \" sizeof(size_t): %lu\\n\"\n" + " \" sizeof(off_t): %lu\\n\"\n" + " \" sizeof(ino_t): %lu\\n\"\n" + " \" sizeof(dev_t): %lu\\n\",\n" + " (unsigned long)sizeof(char),\n" + " (unsigned long)sizeof(int),\n" + " (unsigned long)sizeof(long),\n" + " (unsigned long)sizeof(long long),\n" + " (unsigned long)sizeof(uint8_t),\n" + " (unsigned long)sizeof(uint16_t),\n" + " (unsigned long)sizeof(uint32_t),\n" + " (unsigned long)sizeof(short),\n" + " (unsigned long)sizeof(void*),\n" + " (unsigned long)sizeof(size_t),\n" + " (unsigned long)sizeof(off_t),\n" + " (unsigned long)sizeof(ino_t),\n" + " (unsigned long)sizeof(dev_t));\n" + "\n" + " output(screen, \"\\nBuiltin modules:\\n\"\n" + " \" %s\\n\", STRING_STATIC_MODULES);\n" + "}\n") + +def write_build_options_section(fp, keys, section): + fp.write("\n\t/* Show %s */\n" % section) + fp.write(" output(screen, \"\\n%s:\\n\");\n\n" % section) + + for k in sorted(keys): + fp.write("#ifdef %s\n" % k) + fp.write(" output(screen, \" %s\\n\");\n" % k) + fp.write("#endif\n") + fp.write("\n") + +def write_build_options(task): + tbl = task.env + keys_option_with = [] + keys_option_utmp = [] + keys_option_have = [] + keys_header_sys = [] + keys_header_other = [] + keys_misc = [] + if sys.hexversion>0x300000f: + trans_table = bytes.maketrans(b'.-()', b'____') + else: + import string + trans_table = string.maketrans('.-()', '____') + + for key in tbl: + if key.startswith("HAVE_UT_UT_") or key.find("UTMP") >= 0: + keys_option_utmp.append(key) + elif key.startswith("WITH_"): + keys_option_with.append(key) + elif key.startswith("HAVE_SYS_"): + keys_header_sys.append(key) + elif key.startswith("HAVE_"): + if key.endswith("_H"): + keys_header_other.append(key) + else: + keys_option_have.append(key) + elif key.startswith("static_init_"): + l = key.split("(") + keys_misc.append(l[0]) + else: + keys_misc.append(key.translate(trans_table)) + + tgt = task.outputs[0].bldpath(task.env) + f = open(tgt, 'w') + write_build_options_header(f) + write_build_options_section(f, keys_header_sys, "System Headers") + write_build_options_section(f, keys_header_other, "Headers") + write_build_options_section(f, keys_option_utmp, "UTMP Options") + write_build_options_section(f, keys_option_have, "HAVE_* Defines") + write_build_options_section(f, keys_option_with, "--with Options") + write_build_options_section(f, keys_misc, "Build Options") + write_build_options_footer(f) + f.close() + return 0 + + +def SAMBA_BLDOPTIONS(bld, target): + '''generate the bld_options.c for Samba''' + t = bld.SAMBA_GENERATOR(target, + rule=write_build_options, + dep_vars=['defines'], + target=target) +Build.BuildContext.SAMBA_BLDOPTIONS = SAMBA_BLDOPTIONS diff --git a/buildtools/wafsamba/samba_perl.py b/buildtools/wafsamba/samba_perl.py new file mode 100644 index 0000000..2139d14 --- /dev/null +++ b/buildtools/wafsamba/samba_perl.py @@ -0,0 +1,58 @@ +from waflib import Utils +from waflib.Configure import conf +from samba_utils import get_string +done = {} + +@conf +def SAMBA_CHECK_PERL(conf, mandatory=True, version=(5,0,0)): + if "done" in done: + return + done["done"] = True + conf.find_program('perl', var='PERL', mandatory=mandatory) + conf.load('perl') + path_perl = conf.find_program('perl') + conf.env.PERL_SPECIFIED = (conf.env.PERL != path_perl) + conf.check_perl_version(version) + + def read_perl_config_var(cmd): + output = Utils.cmd_output([conf.env.get_flat('PERL'), '-MConfig', '-e', cmd]) + if not isinstance(output, str): + output = get_string(output) + return Utils.to_list(output) + + def check_perl_config_var(var): + conf.start_msg("Checking for perl $Config{%s}:" % var) + try: + v = read_perl_config_var('print $Config{%s}' % var)[0] + conf.end_msg("'%s'" % (v), 'GREEN') + return v + except IndexError: + conf.end_msg(False, 'YELLOW') + return None + + vendor_prefix = check_perl_config_var('vendorprefix') + + perl_arch_install_dir = None + if vendor_prefix == conf.env.PREFIX: + perl_arch_install_dir = check_perl_config_var('vendorarch') + if perl_arch_install_dir is None: + perl_arch_install_dir = "${LIBDIR}/perl5" + conf.start_msg("PERL_ARCH_INSTALL_DIR: ") + conf.end_msg("'%s'" % (perl_arch_install_dir), 'GREEN') + conf.env.PERL_ARCH_INSTALL_DIR = perl_arch_install_dir + + perl_lib_install_dir = None + if vendor_prefix == conf.env.PREFIX: + perl_lib_install_dir = check_perl_config_var('vendorlib') + if perl_lib_install_dir is None: + perl_lib_install_dir = "${DATADIR}/perl5" + conf.start_msg("PERL_LIB_INSTALL_DIR: ") + conf.end_msg("'%s'" % (perl_lib_install_dir), 'GREEN') + conf.env.PERL_LIB_INSTALL_DIR = perl_lib_install_dir + + perl_inc = read_perl_config_var('print "@INC"') + if '.' in perl_inc: + perl_inc.remove('.') + conf.start_msg("PERL_INC: ") + conf.end_msg("%s" % (perl_inc), 'GREEN') + conf.env.PERL_INC = perl_inc diff --git a/buildtools/wafsamba/samba_pidl.py b/buildtools/wafsamba/samba_pidl.py new file mode 100644 index 0000000..72997c8 --- /dev/null +++ b/buildtools/wafsamba/samba_pidl.py @@ -0,0 +1,175 @@ +# waf build tool for building IDL files with pidl + +import os +from waflib import Build, Utils +from waflib.TaskGen import feature, before +from samba_utils import SET_TARGET_TYPE, TO_LIST, LOCAL_CACHE + +def SAMBA_PIDL(bld, pname, source, + options='', + output_dir='.', + generate_tables=True): + '''Build a IDL file using pidl. + This will produce up to 17 output files depending on the options used''' + + bname = source[0:-4] # strip off the .idl suffix + bname = os.path.basename(bname) + name = "%s_%s" % (pname, bname.upper()) + + if not SET_TARGET_TYPE(bld, name, 'PIDL'): + return + + bld.SET_BUILD_GROUP('build_source') + + # the output files depend on the options used. Use this dictionary + # to map between the options and the resulting file names + options_map = { '--header' : '%s.h', + '--ndr-parser' : 'ndr_%s.c ndr_%s.h', + '--samba3-ndr-server' : 'srv_%s.c srv_%s.h', + '--samba3-ndr-client' : 'cli_%s.c cli_%s.h', + '--server' : 'ndr_%s_s.c', + '--server-compat' : 'ndr_%s_scompat.c ndr_%s_scompat.h', + '--client' : 'ndr_%s_c.c ndr_%s_c.h', + '--python' : 'py_%s.c', + '--tdr-parser' : 'tdr_%s.c tdr_%s.h', + '--dcom-proxy' : '%s_p.c', + '--com-header' : 'com_%s.h' + } + + table_header_idx = None + out_files = [] + options_list = TO_LIST(options) + + for o in options_list: + if o in options_map: + ofiles = TO_LIST(options_map[o]) + for f in ofiles: + out_files.append(os.path.join(output_dir, f % bname)) + if f == 'ndr_%s.h': + # remember this one for the tables generation + table_header_idx = len(out_files) - 1 + + # depend on the full pidl sources + source = TO_LIST(source) + try: + pidl_src_nodes = bld.pidl_files_cache + except AttributeError: + bld.pidl_files_cache = bld.srcnode.ant_glob('pidl/lib/Parse/**/*.pm', flat=False) + bld.pidl_files_cache.extend(bld.srcnode.ant_glob('pidl', flat=False)) + pidl_src_nodes = bld.pidl_files_cache + + # the cd .. is needed because pidl currently is sensitive to the directory it is run in + cpp = "" + cc = "" + if bld.CONFIG_SET("CPP") and bld.CONFIG_GET("CPP") != "": + if isinstance(bld.CONFIG_GET("CPP"), list): + cpp = 'CPP="%s"' % " ".join(bld.CONFIG_GET("CPP")) + else: + cpp = 'CPP="%s"' % bld.CONFIG_GET("CPP") + + if cpp == "CPP=xlc_r": + cpp = "" + + if bld.env['PIDL_DEVELOPER_MODE']: + pidl_dev = 'PIDL_DEVELOPER=1 ' + else: + pidl_dev = '' + + if bld.CONFIG_SET("CC"): + if isinstance(bld.CONFIG_GET("CC"), list): + cc = 'CC="%s"' % " ".join(bld.CONFIG_GET("CC")) + else: + cc = 'CC="%s"' % bld.CONFIG_GET("CC") + + t = bld(rule='cd ${PIDL_LAUNCH_DIR} && %s%s %s ${PERL} ${PIDL} --quiet ${OPTIONS} --outputdir ${OUTPUTDIR} -- "${IDLSRC}"' % (pidl_dev, cpp, cc), + ext_out = '.c', + before = 'c', + update_outputs = True, + shell = True, + source = source, + target = out_files, + name = name, + samba_type = 'PIDL') + + + t.env.PIDL_LAUNCH_DIR = bld.srcnode.path_from(bld.bldnode) + pnode = bld.srcnode.find_resource('pidl/pidl') + t.env.PIDL = pnode.path_from(bld.srcnode) + t.env.OPTIONS = TO_LIST(options) + snode = t.path.find_resource(source[0]) + t.env.IDLSRC = snode.path_from(bld.srcnode) + t.env.OUTPUTDIR = bld.bldnode.path_from(bld.srcnode) + '/' + bld.path.find_dir(output_dir).path_from(bld.srcnode) + + bld.add_manual_dependency(snode, pidl_src_nodes) + + if generate_tables and table_header_idx is not None: + pidl_headers = LOCAL_CACHE(bld, 'PIDL_HEADERS') + pidl_headers[name] = [bld.path.find_or_declare(out_files[table_header_idx])] + + t.more_includes = '#' + bld.path.path_from(bld.srcnode) +Build.BuildContext.SAMBA_PIDL = SAMBA_PIDL + +def SAMBA_PIDL_LIST(bld, name, source, + options='', + output_dir='.', + generate_tables=True, + generate_fuzzers=True): + '''A wrapper for building a set of IDL files''' + for p in TO_LIST(source): + bld.SAMBA_PIDL(name, p, options=options, output_dir=output_dir, generate_tables=generate_tables) + + # Some IDL files don't exactly match between name and + # "interface" so we need a way to skip those, while other IDL + # files have the table generation skipped entirely, on which + # the fuzzers rely + if generate_tables and generate_fuzzers: + interface = p[0:-4] # strip off the .idl suffix + bld.SAMBA_NDR_FUZZ(interface, + auto_deps=True, + fuzz_type="TYPE_STRUCT") + + # Only generate the TYPE_STRUCT fuzzer if this isn't + # really DCE/RPC + if '--client' in options: + bld.SAMBA_NDR_FUZZ(interface, + auto_deps=True, + fuzz_type="TYPE_IN") + bld.SAMBA_NDR_FUZZ(interface, + auto_deps=True, + fuzz_type="TYPE_OUT") +Build.BuildContext.SAMBA_PIDL_LIST = SAMBA_PIDL_LIST + + +################################################################# +# the rule for generating the NDR tables +@feature('collect') +@before('exec_rule') +def collect(self): + pidl_headers = LOCAL_CACHE(self.bld, 'PIDL_HEADERS') + # The first source is tables.pl itself + self.source = Utils.to_list(self.source) + for (name, hd) in pidl_headers.items(): + y = self.bld.get_tgen_by_name(name) + self.bld.ASSERT(y is not None, 'Failed to find PIDL header %s' % name) + y.post() + for node in hd: + self.bld.ASSERT(node is not None, 'Got None as build node generating PIDL table for %s' % name) + self.source.append(node) + + +def SAMBA_PIDL_TABLES(bld, name, target): + '''generate the pidl NDR tables file''' + bld.SET_BUILD_GROUP('main') + t = bld( + features = 'collect', + rule = '${PERL} ${SRC} > ${TGT}', + ext_out = '.c', + before = 'c', + update_outputs = True, + shell = True, + source = '../../librpc/tables.pl', + target = target, + name = name) + t.env.LIBRPC = os.path.join(bld.srcnode.abspath(), 'librpc') +Build.BuildContext.SAMBA_PIDL_TABLES = SAMBA_PIDL_TABLES + diff --git a/buildtools/wafsamba/samba_python.py b/buildtools/wafsamba/samba_python.py new file mode 100644 index 0000000..12a94c8 --- /dev/null +++ b/buildtools/wafsamba/samba_python.py @@ -0,0 +1,157 @@ +# waf build tool for building IDL files with pidl + +import os, sys +from waflib import Build, Logs, Utils, Configure, Errors +from waflib.Configure import conf + +@conf +def SAMBA_CHECK_PYTHON(conf, version=(3,6,0)): + + # enable tool to build python extensions + if conf.env.HAVE_PYTHON_H: + conf.check_python_version(version) + return + + interpreters = [] + + conf.find_program('python3', var='PYTHON', + mandatory=not conf.env.disable_python) + conf.load('python') + path_python = conf.find_program('python3') + + conf.env.PYTHON_SPECIFIED = (conf.env.PYTHON != path_python) + conf.check_python_version(version) + + interpreters.append(conf.env['PYTHON']) + conf.env.python_interpreters = interpreters + + +@conf +def SAMBA_CHECK_PYTHON_HEADERS(conf): + if conf.env.disable_python: + + conf.msg("python headers", "Check disabled due to --disable-python") + # we don't want PYTHONDIR in config.h, as otherwise changing + # --prefix causes a complete rebuild + conf.env.DEFINES = [x for x in conf.env.DEFINES + if not x.startswith('PYTHONDIR=') + and not x.startswith('PYTHONARCHDIR=')] + + return + + if conf.env["python_headers_checked"] == []: + _check_python_headers(conf) + conf.env["python_headers_checked"] = "yes" + + else: + conf.msg("python headers", "using cache") + + # we don't want PYTHONDIR in config.h, as otherwise changing + # --prefix causes a complete rebuild + conf.env.DEFINES = [x for x in conf.env.DEFINES + if not x.startswith('PYTHONDIR=') + and not x.startswith('PYTHONARCHDIR=')] + +def _check_python_headers(conf): + conf.check_python_headers() + + abi_pattern = os.path.splitext(conf.env['pyext_PATTERN'])[0] + conf.env['PYTHON_SO_ABI_FLAG'] = abi_pattern % '' + conf.env['PYTHON_LIBNAME_SO_ABI_FLAG'] = ( + conf.env['PYTHON_SO_ABI_FLAG'].replace('_', '-')) + + for lib in conf.env['LINKFLAGS_PYEMBED']: + if lib.startswith('-L'): + conf.env.append_unique('LIBPATH_PYEMBED', lib[2:]) # strip '-L' + conf.env['LINKFLAGS_PYEMBED'].remove(lib) + + # same as in waf 1.5, keep only '-fno-strict-aliasing' + # and ignore defines such as NDEBUG _FORTIFY_SOURCE=2 + conf.env.DEFINES_PYEXT = [] + conf.env.CFLAGS_PYEXT = ['-fno-strict-aliasing'] + + return + +def PYTHON_BUILD_IS_ENABLED(self): + return self.CONFIG_SET('HAVE_PYTHON_H') + +Build.BuildContext.PYTHON_BUILD_IS_ENABLED = PYTHON_BUILD_IS_ENABLED + + +def SAMBA_PYTHON(bld, name, + source='', + deps='', + public_deps='', + realname=None, + cflags='', + cflags_end=None, + includes='', + init_function_sentinel=None, + local_include=True, + vars=None, + install=True, + enabled=True): + '''build a python extension for Samba''' + + # force-disable when we can't build python modules, so + # every single call doesn't need to pass this in. + if not bld.PYTHON_BUILD_IS_ENABLED(): + enabled = False + + # Save time, no need to build python bindings when fuzzing + if bld.env.enable_fuzzing: + enabled = False + + # when we support static python modules we'll need to gather + # the list from all the SAMBA_PYTHON() targets + if init_function_sentinel is not None: + cflags += ' -DSTATIC_LIBPYTHON_MODULES=%s' % init_function_sentinel + + # From https://docs.python.org/2/c-api/arg.html: + # Starting with Python 2.5 the type of the length argument to + # PyArg_ParseTuple(), PyArg_ParseTupleAndKeywords() and PyArg_Parse() + # can be controlled by defining the macro PY_SSIZE_T_CLEAN before + # including Python.h. If the macro is defined, length is a Py_ssize_t + # rather than an int. + + # Because <Python.h> if often included before includes.h/config.h + # This must be in the -D compiler options + cflags += ' -DPY_SSIZE_T_CLEAN=1' + + source = bld.EXPAND_VARIABLES(source, vars=vars) + + if realname is not None: + link_name = 'python/%s' % realname + else: + link_name = None + + bld.SAMBA_LIBRARY(name, + source=source, + deps=deps, + public_deps=public_deps, + includes=includes, + cflags=cflags, + cflags_end=cflags_end, + local_include=local_include, + vars=vars, + realname=realname, + link_name=link_name, + pyext=True, + target_type='PYTHON', + install_path='${PYTHONARCHDIR}', + allow_undefined_symbols=True, + install=install, + enabled=enabled) + +Build.BuildContext.SAMBA_PYTHON = SAMBA_PYTHON + + +def pyembed_libname(bld, name): + if bld.env['PYTHON_SO_ABI_FLAG']: + return name + bld.env['PYTHON_SO_ABI_FLAG'] + else: + return name + +Build.BuildContext.pyembed_libname = pyembed_libname + + diff --git a/buildtools/wafsamba/samba_third_party.py b/buildtools/wafsamba/samba_third_party.py new file mode 100644 index 0000000..5289848 --- /dev/null +++ b/buildtools/wafsamba/samba_third_party.py @@ -0,0 +1,48 @@ +# functions to support third party libraries + +import os +from waflib import Utils, Build, Context +from waflib.Configure import conf + +@conf +def CHECK_FOR_THIRD_PARTY(conf): + return os.path.exists(os.path.join(Context.g_module.top, 'third_party')) + +Build.BuildContext.CHECK_FOR_THIRD_PARTY = CHECK_FOR_THIRD_PARTY + +@conf +def CHECK_POPT(conf): + return conf.CHECK_BUNDLED_SYSTEM('popt', checkfunctions='poptGetContext', headers='popt.h') + +Build.BuildContext.CHECK_POPT = CHECK_POPT + +@conf +def CHECK_CMOCKA(conf): + return conf.CHECK_BUNDLED_SYSTEM_PKG('cmocka', minversion='1.1.3') + +Build.BuildContext.CHECK_CMOCKA = CHECK_CMOCKA + +@conf +def CHECK_SOCKET_WRAPPER(conf): + return conf.CHECK_BUNDLED_SYSTEM_PKG('socket_wrapper', minversion='1.4.2') +Build.BuildContext.CHECK_SOCKET_WRAPPER = CHECK_SOCKET_WRAPPER + +@conf +def CHECK_NSS_WRAPPER(conf): + return conf.CHECK_BUNDLED_SYSTEM_PKG('nss_wrapper', minversion='1.1.15') +Build.BuildContext.CHECK_NSS_WRAPPER = CHECK_NSS_WRAPPER + +@conf +def CHECK_RESOLV_WRAPPER(conf): + return conf.CHECK_BUNDLED_SYSTEM_PKG('resolv_wrapper', minversion='1.1.8') +Build.BuildContext.CHECK_RESOLV_WRAPPER = CHECK_RESOLV_WRAPPER + +@conf +def CHECK_UID_WRAPPER(conf): + return conf.CHECK_BUNDLED_SYSTEM_PKG('uid_wrapper', minversion='1.3.0') +Build.BuildContext.CHECK_UID_WRAPPER = CHECK_UID_WRAPPER + +@conf +def CHECK_PAM_WRAPPER(conf): + return conf.CHECK_BUNDLED_SYSTEM_PKG('pam_wrapper', minversion='1.1.4') +Build.BuildContext.CHECK_PAM_WRAPPER = CHECK_PAM_WRAPPER diff --git a/buildtools/wafsamba/samba_utils.py b/buildtools/wafsamba/samba_utils.py new file mode 100644 index 0000000..f287e85 --- /dev/null +++ b/buildtools/wafsamba/samba_utils.py @@ -0,0 +1,754 @@ +# a waf tool to add autoconf-like macros to the configure section +# and for SAMBA_ macros for building libraries, binaries etc + +import errno +import os, sys, re, fnmatch, shlex, inspect +from optparse import SUPPRESS_HELP +from waflib import Build, Options, Utils, Task, Logs, Configure, Errors, Context +from waflib import Scripting +from waflib.TaskGen import feature, before, after +from waflib.Configure import ConfigurationContext +from waflib.Logs import debug +from waflib import ConfigSet +from waflib.Build import CACHE_SUFFIX + +# TODO: make this a --option +LIB_PATH="shared" + + +PY3 = sys.version_info[0] == 3 + +if PY3: + + # helper function to get a string from a variable that maybe 'str' or + # 'bytes' if 'bytes' then it is decoded using 'utf8'. If 'str' is passed + # it is returned unchanged + # Using this function is PY2/PY3 code should ensure in most cases + # the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly + # decodes the variable (see PY2 implementation of this function below) + def get_string(bytesorstring): + tmp = bytesorstring + if isinstance(bytesorstring, bytes): + tmp = bytesorstring.decode('utf8') + elif not isinstance(bytesorstring, str): + raise ValueError('Expected byte of string for %s:%s' % (type(bytesorstring), bytesorstring)) + return tmp + +else: + + # Helper function to return string. + # if 'str' or 'unicode' passed in they are returned unchanged + # otherwise an exception is generated + # Using this function is PY2/PY3 code should ensure in most cases + # the PY2 code runs unchanged in PY2 whereas the code in PY3 possibly + # decodes the variable (see PY3 implementation of this function above) + def get_string(bytesorstring): + tmp = bytesorstring + if not(isinstance(bytesorstring, str) or isinstance(bytesorstring, unicode)): + raise ValueError('Expected str or unicode for %s:%s' % (type(bytesorstring), bytesorstring)) + return tmp + +# sigh, python octal constants are a mess +MODE_644 = int('644', 8) +MODE_744 = int('744', 8) +MODE_755 = int('755', 8) +MODE_777 = int('777', 8) + +def conf(f): + # override in order to propagate the argument "mandatory" + def fun(*k, **kw): + mandatory = True + if 'mandatory' in kw: + mandatory = kw['mandatory'] + del kw['mandatory'] + + try: + return f(*k, **kw) + except Errors.ConfigurationError: + if mandatory: + raise + + fun.__name__ = f.__name__ + if 'mandatory' in inspect.getsource(f): + fun = f + + setattr(Configure.ConfigurationContext, f.__name__, fun) + setattr(Build.BuildContext, f.__name__, fun) + return f +Configure.conf = conf +Configure.conftest = conf + +@conf +def SET_TARGET_TYPE(ctx, target, value): + '''set the target type of a target''' + cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') + if target in cache and cache[target] != 'EMPTY': + Logs.error("ERROR: Target '%s' in directory %s re-defined as %s - was %s" % (target, ctx.path.abspath(), value, cache[target])) + sys.exit(1) + LOCAL_CACHE_SET(ctx, 'TARGET_TYPE', target, value) + debug("task_gen: Target '%s' created of type '%s' in %s" % (target, value, ctx.path.abspath())) + return True + + +def GET_TARGET_TYPE(ctx, target): + '''get target type from cache''' + cache = LOCAL_CACHE(ctx, 'TARGET_TYPE') + if not target in cache: + return None + return cache[target] + + +def ADD_LD_LIBRARY_PATH(path): + '''add something to LD_LIBRARY_PATH''' + if 'LD_LIBRARY_PATH' in os.environ: + oldpath = os.environ['LD_LIBRARY_PATH'] + else: + oldpath = '' + newpath = oldpath.split(':') + if not path in newpath: + newpath.append(path) + os.environ['LD_LIBRARY_PATH'] = ':'.join(newpath) + + +def needs_private_lib(bld, target): + '''return True if a target links to a private library''' + for lib in getattr(target, "final_libs", []): + t = bld.get_tgen_by_name(lib) + if t and getattr(t, 'private_library', False): + return True + return False + + +def install_rpath(target): + '''the rpath value for installation''' + bld = target.bld + bld.env['RPATH'] = [] + ret = set() + if bld.env.RPATH_ON_INSTALL: + ret.add(bld.EXPAND_VARIABLES(bld.env.LIBDIR)) + if bld.env.RPATH_ON_INSTALL_PRIVATE and needs_private_lib(bld, target): + ret.add(bld.EXPAND_VARIABLES(bld.env.PRIVATELIBDIR)) + return list(ret) + + +def build_rpath(bld): + '''the rpath value for build''' + rpaths = [os.path.normpath('%s/%s' % (bld.env.BUILD_DIRECTORY, d)) for d in ("shared", "shared/private")] + bld.env['RPATH'] = [] + if bld.env.RPATH_ON_BUILD: + return rpaths + for rpath in rpaths: + ADD_LD_LIBRARY_PATH(rpath) + return [] + + +@conf +def LOCAL_CACHE(ctx, name): + '''return a named build cache dictionary, used to store + state inside other functions''' + if name in ctx.env: + return ctx.env[name] + ctx.env[name] = {} + return ctx.env[name] + + +@conf +def LOCAL_CACHE_SET(ctx, cachename, key, value): + '''set a value in a local cache''' + cache = LOCAL_CACHE(ctx, cachename) + cache[key] = value + + +@conf +def ASSERT(ctx, expression, msg): + '''a build assert call''' + if not expression: + raise Errors.WafError("ERROR: %s\n" % msg) +Build.BuildContext.ASSERT = ASSERT + + +def SUBDIR(bld, subdir, list): + '''create a list of files by pre-pending each with a subdir name''' + ret = '' + for l in TO_LIST(list): + ret = ret + os.path.normpath(os.path.join(subdir, l)) + ' ' + return ret +Build.BuildContext.SUBDIR = SUBDIR + + +def dict_concat(d1, d2): + '''concatenate two dictionaries d1 += d2''' + for t in d2: + if t not in d1: + d1[t] = d2[t] + +def ADD_COMMAND(opt, name, function): + '''add a new top level command to waf''' + Context.g_module.__dict__[name] = function + opt.name = function +Options.OptionsContext.ADD_COMMAND = ADD_COMMAND + + +@feature('c', 'cc', 'cshlib', 'cprogram') +@before('apply_core','exec_rule') +def process_depends_on(self): + '''The new depends_on attribute for build rules + allow us to specify a dependency on output from + a source generation rule''' + if getattr(self , 'depends_on', None): + lst = self.to_list(self.depends_on) + for x in lst: + y = self.bld.get_tgen_by_name(x) + self.bld.ASSERT(y is not None, "Failed to find dependency %s of %s" % (x, self.name)) + y.post() + if getattr(y, 'more_includes', None): + self.includes += " " + y.more_includes + + +def unique_list(seq): + '''return a uniquified list in the same order as the existing list''' + seen = {} + result = [] + for item in seq: + if item in seen: continue + seen[item] = True + result.append(item) + return result + + +def TO_LIST(str, delimiter=None): + '''Split a list, preserving quoted strings and existing lists''' + if str is None: + return [] + if isinstance(str, list): + # we need to return a new independent list... + return list(str) + if len(str) == 0: + return [] + lst = str.split(delimiter) + # the string may have had quotes in it, now we + # check if we did have quotes, and use the slower shlex + # if we need to + for e in lst: + if e[0] == '"': + return shlex.split(str) + return lst + + +def subst_vars_error(string, env): + '''substitute vars, throw an error if a variable is not defined''' + lst = re.split(r'(\$\{\w+\})', string) + out = [] + for v in lst: + if re.match(r'\$\{\w+\}', v): + vname = v[2:-1] + if not vname in env: + raise KeyError("Failed to find variable %s in %s in env %s <%s>" % (vname, string, env.__class__, str(env))) + v = env[vname] + if isinstance(v, list): + v = ' '.join(v) + out.append(v) + return ''.join(out) + + +@conf +def SUBST_ENV_VAR(ctx, varname): + '''Substitute an environment variable for any embedded variables''' + return subst_vars_error(ctx.env[varname], ctx.env) +Build.BuildContext.SUBST_ENV_VAR = SUBST_ENV_VAR + + +def recursive_dirlist(dir, relbase, pattern=None): + '''recursive directory list''' + ret = [] + for f in os.listdir(dir): + f2 = dir + '/' + f + if os.path.isdir(f2): + ret.extend(recursive_dirlist(f2, relbase)) + else: + if pattern and not fnmatch.fnmatch(f, pattern): + continue + ret.append(os.path.relpath(f2, relbase)) + return ret + + +def symlink(src, dst, force=True): + """Can create symlink by force""" + try: + os.symlink(src, dst) + except OSError as exc: + if exc.errno == errno.EEXIST and force: + os.remove(dst) + os.symlink(src, dst) + else: + raise + + +def mkdir_p(dir): + '''like mkdir -p''' + if not dir: + return + if dir.endswith("/"): + mkdir_p(dir[:-1]) + return + if os.path.isdir(dir): + return + mkdir_p(os.path.dirname(dir)) + os.mkdir(dir) + + +def SUBST_VARS_RECURSIVE(string, env): + '''recursively expand variables''' + if string is None: + return string + limit=100 + while (string.find('${') != -1 and limit > 0): + string = subst_vars_error(string, env) + limit -= 1 + return string + + +@conf +def EXPAND_VARIABLES(ctx, varstr, vars=None): + '''expand variables from a user supplied dictionary + + This is most useful when you pass vars=locals() to expand + all your local variables in strings + ''' + + if isinstance(varstr, list): + ret = [] + for s in varstr: + ret.append(EXPAND_VARIABLES(ctx, s, vars=vars)) + return ret + + if not isinstance(varstr, str): + return varstr + + env = ConfigSet.ConfigSet() + ret = varstr + # substitute on user supplied dict if available + if vars is not None: + for v in vars.keys(): + env[v] = vars[v] + ret = SUBST_VARS_RECURSIVE(ret, env) + + # if anything left, subst on the environment as well + if ret.find('${') != -1: + ret = SUBST_VARS_RECURSIVE(ret, ctx.env) + # make sure there is nothing left. Also check for the common + # typo of $( instead of ${ + if ret.find('${') != -1 or ret.find('$(') != -1: + Logs.error('Failed to substitute all variables in varstr=%s' % ret) + sys.exit(1) + return ret +Build.BuildContext.EXPAND_VARIABLES = EXPAND_VARIABLES + + +def RUN_COMMAND(cmd, + env=None, + shell=False): + '''run a external command, return exit code or signal''' + if env: + cmd = SUBST_VARS_RECURSIVE(cmd, env) + + status = os.system(cmd) + if os.WIFEXITED(status): + return os.WEXITSTATUS(status) + if os.WIFSIGNALED(status): + return - os.WTERMSIG(status) + Logs.error("Unknown exit reason %d for command: %s" % (status, cmd)) + return -1 + + +def RUN_PYTHON_TESTS(testfiles, pythonpath=None, extra_env=None): + env = LOAD_ENVIRONMENT() + if pythonpath is None: + pythonpath = os.path.join(Context.g_module.out, 'python') + result = 0 + for interp in env.python_interpreters: + if not isinstance(interp, str): + interp = ' '.join(interp) + for testfile in testfiles: + cmd = "PYTHONPATH=%s %s %s" % (pythonpath, interp, testfile) + if extra_env: + for key, value in extra_env.items(): + cmd = "%s=%s %s" % (key, value, cmd) + print('Running Python test with %s: %s' % (interp, testfile)) + ret = RUN_COMMAND(cmd) + if ret: + print('Python test failed: %s' % cmd) + result = ret + return result + + +# make sure we have md5. some systems don't have it +try: + from hashlib import md5 + # Even if hashlib.md5 exists, it may be unusable. + # Try to use MD5 function. In FIPS mode this will cause an exception + # and we'll get to the replacement code + foo = md5(b'abcd') +except: + try: + import md5 + # repeat the same check here, mere success of import is not enough. + # Try to use MD5 function. In FIPS mode this will cause an exception + foo = md5.md5(b'abcd') + except: + Context.SIG_NIL = hash('abcd') + class replace_md5(object): + def __init__(self): + self.val = None + def update(self, val): + self.val = hash((self.val, val)) + def digest(self): + return str(self.val) + def hexdigest(self): + return self.digest().encode('hex') + def replace_h_file(filename): + f = open(filename, 'rb') + m = replace_md5() + while (filename): + filename = f.read(100000) + m.update(filename) + f.close() + return m.digest() + Utils.md5 = replace_md5 + Task.md5 = replace_md5 + Utils.h_file = replace_h_file + + +def LOAD_ENVIRONMENT(): + '''load the configuration environment, allowing access to env vars + from new commands''' + env = ConfigSet.ConfigSet() + try: + p = os.path.join(Context.g_module.out, 'c4che/default'+CACHE_SUFFIX) + env.load(p) + except (OSError, IOError): + pass + return env + + +def IS_NEWER(bld, file1, file2): + '''return True if file1 is newer than file2''' + curdir = bld.path.abspath() + t1 = os.stat(os.path.join(curdir, file1)).st_mtime + t2 = os.stat(os.path.join(curdir, file2)).st_mtime + return t1 > t2 +Build.BuildContext.IS_NEWER = IS_NEWER + + +@conf +def RECURSE(ctx, directory): + '''recurse into a directory, relative to the curdir or top level''' + try: + visited_dirs = ctx.visited_dirs + except AttributeError: + visited_dirs = ctx.visited_dirs = set() + d = os.path.join(ctx.path.abspath(), directory) + if os.path.exists(d): + abspath = os.path.abspath(d) + else: + abspath = os.path.abspath(os.path.join(Context.g_module.top, directory)) + ctxclass = ctx.__class__.__name__ + key = ctxclass + ':' + abspath + if key in visited_dirs: + # already done it + return + visited_dirs.add(key) + relpath = os.path.relpath(abspath, ctx.path.abspath()) + if ctxclass in ['OptionsContext', + 'ConfigurationContext', + 'BuildContext', + 'CleanContext', + 'InstallContext', + 'UninstallContext', + 'ListContext']: + return ctx.recurse(relpath) + if 'waflib.extras.compat15' in sys.modules: + return ctx.recurse(relpath) + raise Errors.WafError('Unknown RECURSE context class: {}'.format(ctxclass)) +Options.OptionsContext.RECURSE = RECURSE +Build.BuildContext.RECURSE = RECURSE + + +def CHECK_MAKEFLAGS(options): + '''check for MAKEFLAGS environment variable in case we are being + called from a Makefile try to honor a few make command line flags''' + if not 'WAF_MAKE' in os.environ: + return + makeflags = os.environ.get('MAKEFLAGS') + if makeflags is None: + makeflags = "" + jobs_set = False + jobs = None + # we need to use shlex.split to cope with the escaping of spaces + # in makeflags + for opt in shlex.split(makeflags): + # options can come either as -x or as x + if opt[0:2] == 'V=': + options.verbose = Logs.verbose = int(opt[2:]) + if Logs.verbose > 0: + Logs.zones = ['runner'] + if Logs.verbose > 2: + Logs.zones = ['*'] + elif opt[0].isupper() and opt.find('=') != -1: + # this allows us to set waf options on the make command line + # for example, if you do "make FOO=blah", then we set the + # option 'FOO' in Options.options, to blah. If you look in wafsamba/wscript + # you will see that the command line accessible options have their dest= + # set to uppercase, to allow for passing of options from make in this way + # this is also how "make test TESTS=testpattern" works, and + # "make VERBOSE=1" as well as things like "make SYMBOLCHECK=1" + loc = opt.find('=') + setattr(options, opt[0:loc], opt[loc+1:]) + elif opt[0] != '-': + for v in opt: + if re.search(r'j[0-9]*$', v): + jobs_set = True + jobs = opt.strip('j') + elif v == 'k': + options.keep = True + elif re.search(r'-j[0-9]*$', opt): + jobs_set = True + jobs = opt.strip('-j') + elif opt == '-k': + options.keep = True + if not jobs_set: + # default to one job + options.jobs = 1 + elif jobs_set and jobs: + options.jobs = int(jobs) + +waflib_options_parse_cmd_args = Options.OptionsContext.parse_cmd_args +def wafsamba_options_parse_cmd_args(self, _args=None, cwd=None, allow_unknown=False): + (options, commands, envvars) = \ + waflib_options_parse_cmd_args(self, + _args=_args, + cwd=cwd, + allow_unknown=allow_unknown) + CHECK_MAKEFLAGS(options) + if options.jobs == 1: + # + # waflib.Runner.Parallel processes jobs inline if the possible number + # of jobs is just 1. But (at least in waf <= 2.0.12) it still calls + # create a waflib.Runner.Spawner() which creates a single + # waflib.Runner.Consumer() thread that tries to process jobs from the + # queue. + # + # This has strange effects, which are not noticed typically, + # but at least on AIX python has broken threading and fails + # in random ways. + # + # So we just add a dummy Spawner class. + class NoOpSpawner(object): + def __init__(self, master): + return + from waflib import Runner + Runner.Spawner = NoOpSpawner + return options, commands, envvars +Options.OptionsContext.parse_cmd_args = wafsamba_options_parse_cmd_args + +option_groups = {} + +def option_group(opt, name): + '''find or create an option group''' + global option_groups + if name in option_groups: + return option_groups[name] + gr = opt.add_option_group(name) + option_groups[name] = gr + return gr +Options.OptionsContext.option_group = option_group + + +def save_file(filename, contents, create_dir=False): + '''save data to a file''' + if create_dir: + mkdir_p(os.path.dirname(filename)) + try: + f = open(filename, 'w') + f.write(contents) + f.close() + except: + return False + return True + + +def load_file(filename): + '''return contents of a file''' + try: + f = open(filename, 'r') + r = f.read() + f.close() + except: + return None + return r + + +def reconfigure(ctx): + '''rerun configure if necessary''' + if not os.path.exists(os.environ.get('WAFLOCK', '.lock-wscript')): + raise Errors.WafError('configure has not been run') + import samba_wildcard + bld = samba_wildcard.fake_build_environment() + Configure.autoconfig = True + Scripting.check_configured(bld) + + +def map_shlib_extension(ctx, name, python=False): + '''map a filename with a shared library extension of .so to the real shlib name''' + if name is None: + return None + if name[-1:].isdigit(): + # some libraries have specified versions in the wscript rule + return name + (root1, ext1) = os.path.splitext(name) + if python: + return ctx.env.pyext_PATTERN % root1 + else: + (root2, ext2) = os.path.splitext(ctx.env.cshlib_PATTERN) + return root1+ext2 +Build.BuildContext.map_shlib_extension = map_shlib_extension + +def apply_pattern(filename, pattern): + '''apply a filename pattern to a filename that may have a directory component''' + dirname = os.path.dirname(filename) + if not dirname: + return pattern % filename + basename = os.path.basename(filename) + return os.path.join(dirname, pattern % basename) + +def make_libname(ctx, name, nolibprefix=False, version=None, python=False): + """make a library filename + Options: + nolibprefix: don't include the lib prefix + version : add a version number + python : if we should use python module name conventions""" + + if python: + libname = apply_pattern(name, ctx.env.pyext_PATTERN) + else: + libname = apply_pattern(name, ctx.env.cshlib_PATTERN) + if nolibprefix and libname[0:3] == 'lib': + libname = libname[3:] + if version: + if version[0] == '.': + version = version[1:] + (root, ext) = os.path.splitext(libname) + if ext == ".dylib": + # special case - version goes before the prefix + libname = "%s.%s%s" % (root, version, ext) + else: + libname = "%s%s.%s" % (root, ext, version) + return libname +Build.BuildContext.make_libname = make_libname + + +def get_tgt_list(bld): + '''return a list of build objects for samba''' + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + # build a list of task generators we are interested in + tgt_list = [] + for tgt in targets: + type = targets[tgt] + if not type in ['SUBSYSTEM', 'BUILTIN', 'MODULE', 'BINARY', 'LIBRARY', 'PLUGIN', 'ASN1', 'PYTHON']: + continue + t = bld.get_tgen_by_name(tgt) + if t is None: + Logs.error("Target %s of type %s has no task generator" % (tgt, type)) + sys.exit(1) + tgt_list.append(t) + return tgt_list + +from waflib.Context import WSCRIPT_FILE +def PROCESS_SEPARATE_RULE(self, rule): + ''' cause waf to process additional script based on `rule'. + You should have file named wscript_<stage>_rule in the current directory + where stage is either 'configure' or 'build' + ''' + stage = '' + if isinstance(self, Configure.ConfigurationContext): + stage = 'configure' + elif isinstance(self, Build.BuildContext): + stage = 'build' + file_path = os.path.join(self.path.abspath(), WSCRIPT_FILE+'_'+stage+'_'+rule) + node = self.root.find_node(file_path) + if node: + try: + cache = self.recurse_cache + except AttributeError: + cache = self.recurse_cache = {} + if node not in cache: + cache[node] = True + self.pre_recurse(node) + try: + function_code = node.read('r', None) + exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict) + finally: + self.post_recurse(node) + +Build.BuildContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE +ConfigurationContext.PROCESS_SEPARATE_RULE = PROCESS_SEPARATE_RULE + +def AD_DC_BUILD_IS_ENABLED(self): + if self.CONFIG_SET('AD_DC_BUILD_IS_ENABLED'): + return True + return False + +Build.BuildContext.AD_DC_BUILD_IS_ENABLED = AD_DC_BUILD_IS_ENABLED + +@feature('cprogram', 'cshlib', 'cstaticlib') +@after('apply_lib_vars') +@before('apply_obj_vars') +def samba_before_apply_obj_vars(self): + """before apply_obj_vars for uselib, this removes the standard paths""" + + def is_standard_libpath(env, path): + normalized_path = os.path.normpath(path) + for _path in env.STANDARD_LIBPATH: + if _path == normalized_path: + return True + return False + + v = self.env + + for i in v['RPATH']: + if is_standard_libpath(v, i): + v['RPATH'].remove(i) + + for i in v['LIBPATH']: + if is_standard_libpath(v, i): + v['LIBPATH'].remove(i) + +# Samba options are mostly on by default (administrators and packagers +# specify features to remove, not add), which is why default=True + +def samba_add_onoff_option(opt, option, help=(), dest=None, default=True, + with_name="with", without_name="without"): + if default is None: + default_str = "auto" + elif default is True: + default_str = "yes" + elif default is False: + default_str = "no" + else: + default_str = str(default) + + if help == (): + help = ("Build with %s support (default=%s)" % (option, default_str)) + if dest is None: + dest = "with_%s" % option.replace('-', '_') + + with_val = "--%s-%s" % (with_name, option) + without_val = "--%s-%s" % (without_name, option) + + opt.add_option(with_val, help=help, action="store_true", dest=dest, + default=default) + opt.add_option(without_val, help=SUPPRESS_HELP, action="store_false", + dest=dest) +Options.OptionsContext.samba_add_onoff_option = samba_add_onoff_option diff --git a/buildtools/wafsamba/samba_version.py b/buildtools/wafsamba/samba_version.py new file mode 100644 index 0000000..31103e0 --- /dev/null +++ b/buildtools/wafsamba/samba_version.py @@ -0,0 +1,268 @@ +import os, sys +from waflib import Utils, Context +import samba_utils +from samba_git import find_git + +def git_version_summary(path, env=None): + git = find_git(env) + + if git is None: + return ("GIT-UNKNOWN", {}) + + env.GIT = git + + environ = dict(os.environ) + environ["GIT_DIR"] = '%s/.git' % path + environ["GIT_WORK_TREE"] = path + git = samba_utils.get_string(Utils.cmd_output(env.GIT + ' show --pretty=format:"%h%n%ct%n%H%n%cd" --stat HEAD', silent=True, env=environ)) + + lines = git.splitlines() + if not lines or len(lines) < 4: + return ("GIT-UNKNOWN", {}) + + fields = { + "GIT_COMMIT_ABBREV": lines[0], + "GIT_COMMIT_FULLREV": lines[2], + "COMMIT_TIME": int(lines[1]), + "COMMIT_DATE": lines[3], + } + + ret = "GIT-" + fields["GIT_COMMIT_ABBREV"] + + if env.GIT_LOCAL_CHANGES: + clean = Utils.cmd_output('%s diff HEAD | wc -l' % env.GIT, silent=True).strip() + if clean == "0": + fields["COMMIT_IS_CLEAN"] = 1 + else: + fields["COMMIT_IS_CLEAN"] = 0 + ret += "+" + + return (ret, fields) + + +def distversion_version_summary(path): + #get version from .distversion file + suffix = None + fields = {} + + for line in Utils.readf(path + '/.distversion').splitlines(): + if line == '': + continue + if line.startswith("#"): + continue + try: + split_line = line.split("=") + if split_line[1] != "": + key = split_line[0] + value = split_line[1] + if key == "SUFFIX": + suffix = value + continue + fields[key] = value + except: + print("Failed to parse line %s from .distversion file." % (line)) + raise + + if "COMMIT_TIME" in fields: + fields["COMMIT_TIME"] = int(fields["COMMIT_TIME"]) + + if suffix is None: + return ("UNKNOWN", fields) + + return (suffix, fields) + + +class SambaVersion(object): + + def __init__(self, version_dict, path, env=None, is_install=True): + '''Determine the version number of samba + +See VERSION for the format. Entries on that file are +also accepted as dictionary entries here + ''' + + self.MAJOR=None + self.MINOR=None + self.RELEASE=None + self.REVISION=None + self.TP_RELEASE=None + self.ALPHA_RELEASE=None + self.BETA_RELEASE=None + self.PRE_RELEASE=None + self.RC_RELEASE=None + self.IS_SNAPSHOT=True + self.RELEASE_NICKNAME=None + self.VENDOR_SUFFIX=None + self.VENDOR_PATCH=None + + for a, b in version_dict.items(): + if a.startswith("SAMBA_VERSION_"): + setattr(self, a[14:], b) + else: + setattr(self, a, b) + + if self.IS_GIT_SNAPSHOT == "yes": + self.IS_SNAPSHOT=True + elif self.IS_GIT_SNAPSHOT == "no": + self.IS_SNAPSHOT=False + else: + raise Exception("Unknown value for IS_GIT_SNAPSHOT: %s" % self.IS_GIT_SNAPSHOT) + + ## + ## start with "3.0.22" + ## + self.MAJOR=int(self.MAJOR) + self.MINOR=int(self.MINOR) + self.RELEASE=int(self.RELEASE) + + SAMBA_VERSION_STRING = ("%u.%u.%u" % (self.MAJOR, self.MINOR, self.RELEASE)) + +## +## maybe add "3.0.22a" or "4.0.0tp11" or "4.0.0alpha1" or "4.0.0beta1" or "3.0.22pre1" or "3.0.22rc1" +## We do not do pre or rc version on patch/letter releases +## + if self.REVISION is not None: + SAMBA_VERSION_STRING += self.REVISION + if self.TP_RELEASE is not None: + self.TP_RELEASE = int(self.TP_RELEASE) + SAMBA_VERSION_STRING += "tp%u" % self.TP_RELEASE + if self.ALPHA_RELEASE is not None: + self.ALPHA_RELEASE = int(self.ALPHA_RELEASE) + SAMBA_VERSION_STRING += ("alpha%u" % self.ALPHA_RELEASE) + if self.BETA_RELEASE is not None: + self.BETA_RELEASE = int(self.BETA_RELEASE) + SAMBA_VERSION_STRING += ("beta%u" % self.BETA_RELEASE) + if self.PRE_RELEASE is not None: + self.PRE_RELEASE = int(self.PRE_RELEASE) + SAMBA_VERSION_STRING += ("pre%u" % self.PRE_RELEASE) + if self.RC_RELEASE is not None: + self.RC_RELEASE = int(self.RC_RELEASE) + SAMBA_VERSION_STRING += ("rc%u" % self.RC_RELEASE) + + if self.IS_SNAPSHOT: + if not is_install: + suffix = "DEVELOPERBUILD" + self.vcs_fields = {} + elif os.path.exists(os.path.join(path, ".git")): + suffix, self.vcs_fields = git_version_summary(path, env=env) + elif os.path.exists(os.path.join(path, ".distversion")): + suffix, self.vcs_fields = distversion_version_summary(path) + else: + suffix = "UNKNOWN" + self.vcs_fields = {} + self.vcs_fields["SUFFIX"] = suffix + SAMBA_VERSION_STRING += "-" + suffix + else: + self.vcs_fields = {} + + self.OFFICIAL_STRING = SAMBA_VERSION_STRING + + if self.VENDOR_SUFFIX is not None: + SAMBA_VERSION_STRING += ("-" + self.VENDOR_SUFFIX) + self.VENDOR_SUFFIX = self.VENDOR_SUFFIX + + if self.VENDOR_PATCH is not None: + SAMBA_VERSION_STRING += ("-" + self.VENDOR_PATCH) + self.VENDOR_PATCH = self.VENDOR_PATCH + + self.STRING = SAMBA_VERSION_STRING + + if self.RELEASE_NICKNAME is not None: + self.STRING_WITH_NICKNAME = "%s (%s)" % (self.STRING, self.RELEASE_NICKNAME) + else: + self.STRING_WITH_NICKNAME = self.STRING + + def __str__(self): + string="/* Autogenerated by waf */\n" +\ + "#define SAMBA_COPYRIGHT_STRING \"%s\"\n" % self.SAMBA_COPYRIGHT_STRING +\ + "#define SAMBA_VERSION_MAJOR %u\n" % self.MAJOR +\ + "#define SAMBA_VERSION_MINOR %u\n" % self.MINOR +\ + "#define SAMBA_VERSION_RELEASE %u\n" % self.RELEASE + if self.REVISION is not None: + string+="#define SAMBA_VERSION_REVISION %u\n" % self.REVISION + + if self.TP_RELEASE is not None: + string+="#define SAMBA_VERSION_TP_RELEASE %u\n" % self.TP_RELEASE + + if self.ALPHA_RELEASE is not None: + string+="#define SAMBA_VERSION_ALPHA_RELEASE %u\n" % self.ALPHA_RELEASE + + if self.BETA_RELEASE is not None: + string+="#define SAMBA_VERSION_BETA_RELEASE %u\n" % self.BETA_RELEASE + + if self.PRE_RELEASE is not None: + string+="#define SAMBA_VERSION_PRE_RELEASE %u\n" % self.PRE_RELEASE + + if self.RC_RELEASE is not None: + string+="#define SAMBA_VERSION_RC_RELEASE %u\n" % self.RC_RELEASE + + for name in sorted(self.vcs_fields.keys()): + string+="#define SAMBA_VERSION_%s " % name + value = self.vcs_fields[name] + string_types = str + if sys.version_info[0] < 3: + string_types = basestring + if isinstance(value, string_types): + string += "\"%s\"" % value + elif type(value) is int: + string += "%d" % value + else: + raise Exception("Unknown type for %s: %r" % (name, value)) + string += "\n" + + string+="#define SAMBA_VERSION_OFFICIAL_STRING \"" + self.OFFICIAL_STRING + "\"\n" + + if self.VENDOR_SUFFIX is not None: + string+="#define SAMBA_VERSION_VENDOR_SUFFIX " + self.VENDOR_SUFFIX + "\n" + if self.VENDOR_PATCH is not None: + string+="#define SAMBA_VERSION_VENDOR_PATCH " + self.VENDOR_PATCH + "\n" + + if self.RELEASE_NICKNAME is not None: + string+="#define SAMBA_VERSION_RELEASE_NICKNAME " + self.RELEASE_NICKNAME + "\n" + + # We need to put this #ifdef in to the headers so that vendors can override the version with a function + string+=''' +#ifdef SAMBA_VERSION_VENDOR_FUNCTION +# define SAMBA_VERSION_STRING SAMBA_VERSION_VENDOR_FUNCTION +#else /* SAMBA_VERSION_VENDOR_FUNCTION */ +# define SAMBA_VERSION_STRING "''' + self.STRING_WITH_NICKNAME + '''" +#endif +''' + string+="/* Version for mkrelease.sh: \nSAMBA_VERSION_STRING=" + self.STRING_WITH_NICKNAME + "\n */\n" + + return string + + +def samba_version_file(version_file, path, env=None, is_install=True): + '''Parse the version information from a VERSION file''' + + with open(version_file, 'r') as f: + version_dict = {} + for line in f: + line = line.strip() + if line == '': + continue + if line.startswith("#"): + continue + try: + split_line = line.split("=") + if split_line[1] != "": + value = split_line[1].strip('"') + version_dict[split_line[0]] = value + except: + print("Failed to parse line %s from %s" % (line, version_file)) + raise + + return SambaVersion(version_dict, path, env=env, is_install=is_install) + + + +def load_version(env=None, is_install=True): + '''load samba versions either from ./VERSION or git + return a version object for detailed breakdown''' + if not env: + env = samba_utils.LOAD_ENVIRONMENT() + + version = samba_version_file("./VERSION", ".", env, is_install=is_install) + Context.g_module.VERSION = version.STRING + return version diff --git a/buildtools/wafsamba/samba_waf18.py b/buildtools/wafsamba/samba_waf18.py new file mode 100644 index 0000000..54444b3 --- /dev/null +++ b/buildtools/wafsamba/samba_waf18.py @@ -0,0 +1,433 @@ +# compatibility layer for building with more recent waf versions + +import os, shlex, sys +from waflib import Build, Configure, Node, Utils, Options, Logs, TaskGen +from waflib import ConfigSet +from waflib.TaskGen import feature, after +from waflib.Configure import conf, ConfigurationContext + +from waflib.Tools.flex import decide_ext + +# This version of flexfun runs in tsk.get_cwd() as opposed to the +# bld.variant_dir: since input paths adjusted against tsk.get_cwd(), we have to +# use tsk.get_cwd() for the work directory as well. +def flexfun(tsk): + env = tsk.env + bld = tsk.generator.bld + def to_list(xx): + if isinstance(xx, str): + return [xx] + return xx + tsk.last_cmd = lst = [] + lst.extend(to_list(env.FLEX)) + lst.extend(to_list(env.FLEXFLAGS)) + inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] + if env.FLEX_MSYS: + inputs = [x.replace(os.sep, '/') for x in inputs] + lst.extend(inputs) + lst = [x for x in lst if x] + txt = bld.cmd_and_log(lst, cwd=tsk.get_cwd(), env=env.env or None, quiet=0) + tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 + +TaskGen.declare_chain( + name = 'flex', + rule = flexfun, # issue #854 + ext_in = '.l', + decider = decide_ext, +) + +Build.BuildContext.variant = 'default' +Build.CleanContext.variant = 'default' +Build.InstallContext.variant = 'default' +Build.UninstallContext.variant = 'default' +Build.ListContext.variant = 'default' + +def abspath(self, env=None): + if env and hasattr(self, 'children'): + return self.get_bld().abspath() + return self.old_abspath() +Node.Node.old_abspath = Node.Node.abspath +Node.Node.abspath = abspath + +def bldpath(self, env=None): + return self.abspath() + #return self.path_from(self.ctx.bldnode.parent) +Node.Node.bldpath = bldpath + +def srcpath(self, env=None): + return self.abspath() + #return self.path_from(self.ctx.bldnode.parent) +Node.Node.srcpath = srcpath + +def store_fast(self, filename): + file = open(filename, 'wb') + data = self.get_merged_dict() + try: + Build.cPickle.dump(data, file, -1) + finally: + file.close() +ConfigSet.ConfigSet.store_fast = store_fast + +def load_fast(self, filename): + file = open(filename, 'rb') + try: + data = Build.cPickle.load(file) + finally: + file.close() + self.table.update(data) +ConfigSet.ConfigSet.load_fast = load_fast + +@feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') +@after('propagate_uselib_vars', 'process_source') +def apply_incpaths(self): + lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) + self.includes_nodes = lst + cwdx = getattr(self.bld, 'cwdx', self.bld.bldnode) + self.env['INCPATHS'] = [x.path_from(cwdx) for x in lst] + +@conf +def define(self, key, val, quote=True, comment=None): + assert key and isinstance(key, str) + + if val is None: + val = () + elif isinstance(val, bool): + val = int(val) + + # waf 1.5 + self.env[key] = val + + if isinstance(val, int) or isinstance(val, float): + s = '%s=%s' + else: + s = quote and '%s="%s"' or '%s=%s' + app = s % (key, str(val)) + + ban = key + '=' + lst = self.env.DEFINES + for x in lst: + if x.startswith(ban): + lst[lst.index(x)] = app + break + else: + self.env.append_value('DEFINES', app) + + self.env.append_unique('define_key', key) + +# compat15 removes this but we want to keep it +@conf +def undefine(self, key, from_env=True, comment=None): + assert key and isinstance(key, str) + + ban = key + '=' + self.env.DEFINES = [x for x in self.env.DEFINES if not x.startswith(ban)] + self.env.append_unique('define_key', key) + # waf 1.5 + if from_env: + self.env[key] = () + +class ConfigurationContext(Configure.ConfigurationContext): + def init_dirs(self): + self.setenv('default') + self.env.merge_config_header = True + return super(ConfigurationContext, self).init_dirs() + +def find_program_samba(self, *k, **kw): + # Override the waf default set in the @conf decorator in Configure.py + if 'mandatory' not in kw: + kw['mandatory'] = False + ret = self.find_program_old(*k, **kw) + return ret +Configure.ConfigurationContext.find_program_old = Configure.ConfigurationContext.find_program +Configure.ConfigurationContext.find_program = find_program_samba + +Build.BuildContext.ENFORCE_GROUP_ORDERING = Utils.nada +Build.BuildContext.AUTOCLEANUP_STALE_FILES = Utils.nada + +@conf +def check(self, *k, **kw): + '''Override the waf defaults to inject --with-directory options''' + + # match the configuration test with specific options, for example: + # --with-libiconv -> Options.options.iconv_open -> "Checking for library iconv" + self.validate_c(kw) + + additional_dirs = [] + if 'msg' in kw: + msg = kw['msg'] + for x in Options.OptionsContext.parser.parser.option_list: + if getattr(x, 'match', None) and msg in x.match: + d = getattr(Options.options, x.dest, '') + if d: + additional_dirs.append(d) + + # we add the additional dirs twice: once for the test data, and again if the compilation test succeeds below + def add_options_dir(dirs, env): + for x in dirs: + if not x in env.CPPPATH: + env.CPPPATH = [os.path.join(x, 'include')] + env.CPPPATH + if not x in env.LIBPATH: + env.LIBPATH = [os.path.join(x, 'lib')] + env.LIBPATH + + add_options_dir(additional_dirs, kw['env']) + + self.start_msg(kw['msg'], **kw) + ret = None + try: + ret = self.run_build(*k, **kw) + except self.errors.ConfigurationError: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + if Logs.verbose > 1: + raise + else: + self.fatal('The configuration failed') + else: + kw['success'] = ret + # success! time for brandy + add_options_dir(additional_dirs, self.env) + + ret = self.post_check(*k, **kw) + if not ret: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + self.fatal('The configuration failed %r' % ret) + else: + self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) + return ret + +@conf +def CHECK_LIBRARY_SUPPORT(conf, rpath=False, version_script=False, msg=None): + '''see if the platform supports building libraries''' + + if msg is None: + if rpath: + msg = "rpath library support" + else: + msg = "building library support" + + def build(bld): + lib_node = bld.srcnode.make_node('libdir/liblc1.c') + lib_node.parent.mkdir() + lib_node.write('int lib_func(void) { return 42; }\n', 'w') + main_node = bld.srcnode.make_node('main.c') + main_node.write('int lib_func(void);\n' + 'int main(void) {return !(lib_func() == 42);}', 'w') + linkflags = [] + if version_script: + script = bld.srcnode.make_node('ldscript') + script.write('TEST_1.0A2 { global: *; };\n', 'w') + linkflags.append('-Wl,--version-script=%s' % script.abspath()) + bld(features='c cshlib', source=lib_node, target='lib1', linkflags=linkflags, name='lib1') + o = bld(features='c cprogram', source=main_node, target='prog1', uselib_local='lib1') + if rpath: + o.rpath = [lib_node.parent.abspath()] + def run_app(self): + args = conf.SAMBA_CROSS_ARGS(msg=msg) + env = dict(os.environ) + env['LD_LIBRARY_PATH'] = self.inputs[0].parent.abspath() + os.pathsep + env.get('LD_LIBRARY_PATH', '') + self.generator.bld.cmd_and_log([self.inputs[0].abspath()] + args, env=env) + o.post() + bld(rule=run_app, source=o.link_task.outputs[0]) + + # ok, so it builds + try: + conf.check(build_fun=build, msg='Checking for %s' % msg) + except conf.errors.ConfigurationError: + return False + return True + +@conf +def CHECK_NEED_LC(conf, msg): + '''check if we need -lc''' + def build(bld): + lib_node = bld.srcnode.make_node('libdir/liblc1.c') + lib_node.parent.mkdir() + lib_node.write('#include <stdio.h>\nint lib_func(void) { FILE *f = fopen("foo", "r");}\n', 'w') + bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') + try: + conf.check(build_fun=build, msg=msg, okmsg='-lc is unnecessary', errmsg='-lc is necessary') + except conf.errors.ConfigurationError: + return False + return True + +# already implemented on "waf -v" +def order(bld, tgt_list): + return True +Build.BuildContext.check_group_ordering = order + +@conf +def CHECK_CFG(self, *k, **kw): + if 'args' in kw: + kw['args'] = shlex.split(kw['args']) + if not 'mandatory' in kw: + kw['mandatory'] = False + kw['global_define'] = True + return self.check_cfg(*k, **kw) + +def cmd_output(cmd, **kw): + + silent = False + if 'silent' in kw: + silent = kw['silent'] + del(kw['silent']) + + if 'e' in kw: + tmp = kw['e'] + del(kw['e']) + kw['env'] = tmp + + kw['shell'] = isinstance(cmd, str) + kw['stdout'] = Utils.subprocess.PIPE + if silent: + kw['stderr'] = Utils.subprocess.PIPE + + try: + p = Utils.subprocess.Popen(cmd, **kw) + output = p.communicate()[0] + except OSError as e: + raise ValueError(str(e)) + + if p.returncode: + if not silent: + msg = "command execution failed: %s -> %r" % (cmd, str(output)) + raise ValueError(msg) + output = '' + return output +Utils.cmd_output = cmd_output + + +@TaskGen.feature('c', 'cxx', 'd') +@TaskGen.before('apply_incpaths', 'propagate_uselib_vars') +@TaskGen.after('apply_link', 'process_source') +def apply_uselib_local(self): + """ + process the uselib_local attribute + execute after apply_link because of the execution order set on 'link_task' + """ + env = self.env + from waflib.Tools.ccroot import stlink_task + + # 1. the case of the libs defined in the project (visit ancestors first) + # the ancestors external libraries (uselib) will be prepended + self.uselib = self.to_list(getattr(self, 'uselib', [])) + self.includes = self.to_list(getattr(self, 'includes', [])) + names = self.to_list(getattr(self, 'uselib_local', [])) + get = self.bld.get_tgen_by_name + seen = set() + seen_uselib = set() + tmp = Utils.deque(names) # consume a copy of the list of names + if tmp: + if Logs.verbose: + Logs.warn('compat: "uselib_local" is deprecated, replace by "use"') + while tmp: + lib_name = tmp.popleft() + # visit dependencies only once + if lib_name in seen: + continue + + y = get(lib_name) + y.post() + seen.add(lib_name) + + # object has ancestors to process (shared libraries): add them to the end of the list + if getattr(y, 'uselib_local', None): + for x in self.to_list(getattr(y, 'uselib_local', [])): + obj = get(x) + obj.post() + if getattr(obj, 'link_task', None): + if not isinstance(obj.link_task, stlink_task): + tmp.append(x) + + # link task and flags + if getattr(y, 'link_task', None): + + link_name = y.target[y.target.rfind(os.sep) + 1:] + if isinstance(y.link_task, stlink_task): + env.append_value('STLIB', [link_name]) + else: + # some linkers can link against programs + env.append_value('LIB', [link_name]) + + # the order + self.link_task.set_run_after(y.link_task) + + # for the recompilation + self.link_task.dep_nodes += y.link_task.outputs + + # add the link path too + tmp_path = y.link_task.outputs[0].parent.bldpath() + if not tmp_path in env['LIBPATH']: + env.prepend_value('LIBPATH', [tmp_path]) + + # add ancestors uselib too - but only propagate those that have no staticlib defined + for v in self.to_list(getattr(y, 'uselib', [])): + if v not in seen_uselib: + seen_uselib.add(v) + if not env['STLIB_' + v]: + if not v in self.uselib: + self.uselib.insert(0, v) + + # if the library task generator provides 'export_includes', add to the include path + # the export_includes must be a list of paths relative to the other library + if getattr(y, 'export_includes', None): + self.includes.extend(y.to_incnodes(y.export_includes)) + +@TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib') +@TaskGen.after('apply_link') +def apply_objdeps(self): + "add the .o files produced by some other object files in the same manner as uselib_local" + names = getattr(self, 'add_objects', []) + if not names: + return + names = self.to_list(names) + + get = self.bld.get_tgen_by_name + seen = [] + while names: + x = names[0] + + # visit dependencies only once + if x in seen: + names = names[1:] + continue + + # object does not exist ? + y = get(x) + + # object has ancestors to process first ? update the list of names + if getattr(y, 'add_objects', None): + added = 0 + lst = y.to_list(y.add_objects) + lst.reverse() + for u in lst: + if u in seen: + continue + added = 1 + names = [u]+names + if added: + continue # list of names modified, loop + + # safe to process the current object + y.post() + seen.append(x) + + for t in getattr(y, 'compiled_tasks', []): + self.link_task.inputs.extend(t.outputs) + +@TaskGen.after('apply_link') +def process_obj_files(self): + if not hasattr(self, 'obj_files'): + return + for x in self.obj_files: + node = self.path.find_resource(x) + self.link_task.inputs.append(node) + +@TaskGen.taskgen_method +def add_obj_file(self, file): + """Small example on how to link object files as if they were source + obj = bld.create_obj('cc') + obj.add_obj_file('foo.o')""" + if not hasattr(self, 'obj_files'): + self.obj_files = [] + if not 'process_obj_files' in self.meths: + self.meths.append('process_obj_files') + self.obj_files.append(file) diff --git a/buildtools/wafsamba/samba_wildcard.py b/buildtools/wafsamba/samba_wildcard.py new file mode 100644 index 0000000..1ea2803 --- /dev/null +++ b/buildtools/wafsamba/samba_wildcard.py @@ -0,0 +1,151 @@ +# based on playground/evil in the waf svn tree + +import os, datetime, fnmatch +from waflib import Scripting, Utils, Options, Logs, Errors +from waflib import ConfigSet, Context +from samba_utils import LOCAL_CACHE + +def run_task(t, k): + '''run a single build task''' + ret = t.run() + if ret: + raise Errors.WafError("Failed to build %s: %u" % (k, ret)) + + +def run_named_build_task(cmd): + '''run a named build task, matching the cmd name using fnmatch + wildcards against inputs and outputs of all build tasks''' + bld = fake_build_environment(info=False) + found = False + cwd_node = bld.root.find_dir(os.getcwd()) + top_node = bld.root.find_dir(bld.srcnode.abspath()) + + cmd = os.path.normpath(cmd) + + # cope with builds of bin/*/* + if os.path.islink(cmd): + cmd = os.path.relpath(os.readlink(cmd), os.getcwd()) + + if cmd[0:12] == "bin/default/": + cmd = cmd[12:] + + for g in bld.task_manager.groups: + for attr in ['outputs', 'inputs']: + for t in g.tasks: + s = getattr(t, attr, []) + for k in s: + relpath1 = k.relpath_gen(cwd_node) + relpath2 = k.relpath_gen(top_node) + if (fnmatch.fnmatch(relpath1, cmd) or + fnmatch.fnmatch(relpath2, cmd)): + t.position = [0,0] + print(t.display()) + run_task(t, k) + found = True + + + if not found: + raise Errors.WafError("Unable to find build target matching %s" % cmd) + + +def rewrite_compile_targets(): + '''cope with the bin/ form of compile target''' + if not Options.options.compile_targets: + return + + bld = fake_build_environment(info=False) + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + tlist = [] + + for t in Options.options.compile_targets.split(','): + if not os.path.islink(t): + tlist.append(t) + continue + link = os.readlink(t) + list = link.split('/') + for name in [list[-1], '/'.join(list[-2:])]: + if name in targets: + tlist.append(name) + continue + Options.options.compile_targets = ",".join(tlist) + + + +def wildcard_main(missing_cmd_fn): + '''this replaces main from Scripting, allowing us to override the + behaviour for unknown commands + + If a unknown command is found, then missing_cmd_fn() is called with + the name of the requested command + ''' + Scripting.commands = Options.arg_line[:] + + # rewrite the compile targets to cope with the bin/xx form + rewrite_compile_targets() + + while Scripting.commands: + x = Scripting.commands.pop(0) + + ini = datetime.datetime.now() + if x == 'configure': + fun = Scripting.configure + elif x == 'build': + fun = Scripting.build + else: + fun = getattr(Utils.g_module, x, None) + + # this is the new addition on top of main from Scripting.py + if not fun: + missing_cmd_fn(x) + break + + ctx = getattr(Utils.g_module, x + '_context', Utils.Context)() + + if x in ['init', 'shutdown', 'dist', 'distclean', 'distcheck']: + try: + fun(ctx) + except TypeError: + fun() + else: + fun(ctx) + + ela = '' + if not Options.options.progress_bar: + ela = ' (%s)' % Utils.get_elapsed_time(ini) + + if x != 'init' and x != 'shutdown': + Logs.info('%r finished successfully%s' % (x, ela)) + + if not Scripting.commands and x != 'shutdown': + Scripting.commands.append('shutdown') + + + + +def fake_build_environment(info=True, flush=False): + """create all the tasks for the project, but do not run the build + return the build context in use""" + bld = getattr(Context.g_module, 'build_context', Utils.Context)() + bld = Scripting.check_configured(bld) + + Options.commands['install'] = False + Options.commands['uninstall'] = False + + bld.is_install = 0 # False + + try: + proj = ConfigSet.ConfigSet(Options.lockfile) + except IOError: + raise Errors.WafError("Project not configured (run './configure' first)") + + bld.load_envs() + + if info: + Logs.info("Waf: Entering directory `%s'" % bld.bldnode.abspath()) + bld.add_subdirs([os.path.split(Context.g_module.root_path)[0]]) + + bld.pre_build() + if flush: + bld.flush() + return bld + diff --git a/buildtools/wafsamba/stale_files.py b/buildtools/wafsamba/stale_files.py new file mode 100644 index 0000000..ac2c2a7 --- /dev/null +++ b/buildtools/wafsamba/stale_files.py @@ -0,0 +1,114 @@ +# encoding: utf-8 +# Thomas Nagy, 2006-2010 (ita) + +""" +Add a pre-build hook to remove all build files +which do not have a corresponding target + +This can be used for example to remove the targets +that have changed name without performing +a full 'waf clean' + +Of course, it will only work if there are no dynamically generated +nodes/tasks, in which case the method will have to be modified +to exclude some folders for example. +""" + +from waflib import Logs, Build, Options, Utils, Errors +import os +from wafsamba import samba_utils +from Runner import Parallel + +old_refill_task_list = Parallel.refill_task_list +def replace_refill_task_list(self): + '''replacement for refill_task_list() that deletes stale files''' + + iit = old_refill_task_list(self) + bld = self.bld + + if not getattr(bld, 'new_rules', False): + # we only need to check for stale files if the build rules changed + return iit + + if Options.options.compile_targets: + # not safe when --target is used + return iit + + # execute only once + if getattr(self, 'cleanup_done', False): + return iit + self.cleanup_done = True + + def group_name(g): + tm = self.bld.task_manager + return [x for x in tm.groups_names if id(tm.groups_names[x]) == id(g)][0] + + bin_base = bld.bldnode.abspath() + bin_base_len = len(bin_base) + + # paranoia + if bin_base[-4:] != '/bin': + raise Errors.WafError("Invalid bin base: %s" % bin_base) + + # obtain the expected list of files + expected = [] + for i in range(len(bld.task_manager.groups)): + g = bld.task_manager.groups[i] + tasks = g.tasks_gen + for x in tasks: + try: + if getattr(x, 'target'): + tlist = samba_utils.TO_LIST(getattr(x, 'target')) + ttype = getattr(x, 'samba_type', None) + task_list = getattr(x, 'compiled_tasks', []) + if task_list: + # this gets all of the .o files, including the task + # ids, so foo.c maps to foo_3.o for idx=3 + for tsk in task_list: + for output in tsk.outputs: + objpath = os.path.normpath(output.abspath(bld.env)) + expected.append(objpath) + for t in tlist: + if ttype in ['LIBRARY', 'PLUGIN', 'MODULE']: + t = samba_utils.apply_pattern(t, bld.env.shlib_PATTERN) + if ttype == 'PYTHON': + t = samba_utils.apply_pattern(t, bld.env.pyext_PATTERN) + p = os.path.join(x.path.abspath(bld.env), t) + p = os.path.normpath(p) + expected.append(p) + for n in x.allnodes: + p = n.abspath(bld.env) + if p[0:bin_base_len] == bin_base: + expected.append(p) + except: + pass + + for root, dirs, files in os.walk(bin_base): + for f in files: + p = root + '/' + f + if os.path.islink(p): + link = os.readlink(p) + if link[0:bin_base_len] == bin_base: + p = link + if f in ['config.h']: + continue + (froot, fext) = os.path.splitext(f) + if fext not in [ '.c', '.h', '.so', '.o' ]: + continue + if f[-7:] == '.inst.h': + continue + if p.find("/.conf") != -1: + continue + if not p in expected and os.path.exists(p): + Logs.warn("Removing stale file: %s" % p) + os.unlink(p) + return iit + + +def AUTOCLEANUP_STALE_FILES(bld): + """automatically clean up any files in bin that shouldn't be there""" + global old_refill_task_list + old_refill_task_list = Parallel.refill_task_list + Parallel.refill_task_list = replace_refill_task_list + Parallel.bld = bld +Build.BuildContext.AUTOCLEANUP_STALE_FILES = AUTOCLEANUP_STALE_FILES diff --git a/buildtools/wafsamba/symbols.py b/buildtools/wafsamba/symbols.py new file mode 100644 index 0000000..99e121c --- /dev/null +++ b/buildtools/wafsamba/symbols.py @@ -0,0 +1,659 @@ +# a waf tool to extract symbols from object files or libraries +# using nm, producing a set of exposed defined/undefined symbols + +import os, re, subprocess +from waflib import Utils, Build, Options, Logs, Errors +from waflib.Logs import debug +from samba_utils import TO_LIST, LOCAL_CACHE, get_tgt_list + +# these are the data structures used in symbols.py: +# +# bld.env.symbol_map : dictionary mapping public symbol names to list of +# subsystem names where that symbol exists +# +# t.in_library : list of libraries that t is in +# +# bld.env.public_symbols: set of public symbols for each subsystem +# bld.env.used_symbols : set of used symbols for each subsystem +# +# bld.env.syslib_symbols: dictionary mapping system library name to set of symbols +# for that library +# bld.env.library_dict : dictionary mapping built library paths to subsystem names +# +# LOCAL_CACHE(bld, 'TARGET_TYPE') : dictionary mapping subsystem name to target type + + +def symbols_extract(bld, objfiles, dynamic=False): + '''extract symbols from objfile, returning a dictionary containing + the set of undefined and public symbols for each file''' + + ret = {} + + # see if we can get some results from the nm cache + if not bld.env.nm_cache: + bld.env.nm_cache = {} + + objfiles = set(objfiles).copy() + + remaining = set() + for obj in objfiles: + if obj in bld.env.nm_cache: + ret[obj] = bld.env.nm_cache[obj].copy() + else: + remaining.add(obj) + objfiles = remaining + + if len(objfiles) == 0: + return ret + + cmd = ["nm"] + if dynamic: + # needed for some .so files + cmd.append("-D") + cmd.extend(list(objfiles)) + + nmpipe = subprocess.Popen(cmd, stdout=subprocess.PIPE).stdout + if len(objfiles) == 1: + filename = list(objfiles)[0] + ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set()} + + for line in nmpipe: + line = line.strip() + if line.endswith(b':'): + filename = line[:-1] + ret[filename] = { "PUBLIC": set(), "UNDEFINED" : set() } + continue + cols = line.split(b" ") + if cols == [b'']: + continue + # see if the line starts with an address + if len(cols) == 3: + symbol_type = cols[1] + symbol = cols[2] + else: + symbol_type = cols[0] + symbol = cols[1] + if symbol_type in b"BDGTRVWSi": + # its a public symbol + ret[filename]["PUBLIC"].add(symbol) + elif symbol_type in b"U": + ret[filename]["UNDEFINED"].add(symbol) + + # add to the cache + for obj in objfiles: + if obj in ret: + bld.env.nm_cache[obj] = ret[obj].copy() + else: + bld.env.nm_cache[obj] = { "PUBLIC": set(), "UNDEFINED" : set() } + + return ret + + +def real_name(name): + if name.find(".objlist") != -1: + name = name[:-8] + return name + + +def find_ldd_path(bld, libname, binary): + '''find the path to the syslib we will link against''' + ret = None + if not bld.env.syslib_paths: + bld.env.syslib_paths = {} + if libname in bld.env.syslib_paths: + return bld.env.syslib_paths[libname] + + lddpipe = subprocess.Popen(['ldd', binary], stdout=subprocess.PIPE).stdout + for line in lddpipe: + line = line.strip() + cols = line.split(b" ") + if len(cols) < 3 or cols[1] != b"=>": + continue + if cols[0].startswith(b"libc."): + # save this one too + bld.env.libc_path = cols[2] + if cols[0].startswith(libname): + ret = cols[2] + bld.env.syslib_paths[libname] = ret + return ret + + +# some regular expressions for parsing readelf output +re_sharedlib = re.compile(rb'Shared library: \[(.*)\]') +# output from readelf could be `Library rpath` or `Libray runpath` +re_rpath = re.compile(rb'Library (rpath|runpath): \[(.*)\]') + +def get_libs(bld, binname): + '''find the list of linked libraries for any binary or library + binname is the path to the binary/library on disk + + We do this using readelf instead of ldd as we need to avoid recursing + into system libraries + ''' + + # see if we can get the result from the ldd cache + if not bld.env.lib_cache: + bld.env.lib_cache = {} + if binname in bld.env.lib_cache: + return bld.env.lib_cache[binname].copy() + + rpath = [] + libs = set() + + elfpipe = subprocess.Popen(['readelf', '--dynamic', binname], stdout=subprocess.PIPE).stdout + for line in elfpipe: + m = re_sharedlib.search(line) + if m: + libs.add(m.group(1)) + m = re_rpath.search(line) + if m: + # output from Popen is always bytestr even in py3 + rpath.extend(m.group(2).split(b":")) + + ret = set() + for lib in libs: + found = False + for r in rpath: + path = os.path.join(r, lib) + if os.path.exists(path): + ret.add(os.path.realpath(path)) + found = True + break + if not found: + # we didn't find this lib using rpath. It is probably a system + # library, so to find the path to it we either need to use ldd + # or we need to start parsing /etc/ld.so.conf* ourselves. We'll + # use ldd for now, even though it is slow + path = find_ldd_path(bld, lib, binname) + if path: + ret.add(os.path.realpath(path)) + + bld.env.lib_cache[binname] = ret.copy() + + return ret + + +def get_libs_recursive(bld, binname, seen): + '''find the recursive list of linked libraries for any binary or library + binname is the path to the binary/library on disk. seen is a set used + to prevent loops + ''' + if binname in seen: + return set() + ret = get_libs(bld, binname) + seen.add(binname) + for lib in ret: + # we don't want to recurse into system libraries. If a system + # library that we use (eg. libcups) happens to use another library + # (such as libkrb5) which contains common symbols with our own + # libraries, then that is not an error + if lib in bld.env.library_dict: + ret = ret.union(get_libs_recursive(bld, lib, seen)) + return ret + + + +def find_syslib_path(bld, libname, deps): + '''find the path to the syslib we will link against''' + # the strategy is to use the targets that depend on the library, and run ldd + # on it to find the real location of the library that is used + + linkpath = deps[0].link_task.outputs[0].abspath(bld.env) + + if libname == "python": + libname += bld.env.PYTHON_VERSION + + return find_ldd_path(bld, "lib%s" % libname.lower(), linkpath) + + +def build_symbol_sets(bld, tgt_list): + '''build the public_symbols and undefined_symbols attributes for each target''' + + if bld.env.public_symbols: + return + + objlist = [] # list of object file + objmap = {} # map from object filename to target (subsystem) name + + for t in tgt_list: + t.public_symbols = set() + t.undefined_symbols = set() + t.used_symbols = set() + for tsk in getattr(t, 'compiled_tasks', []): + for output in tsk.outputs: + objpath = output.abspath(bld.env) + objlist.append(objpath) + objmap[objpath] = t + + symbols = symbols_extract(bld, objlist) + for obj in objlist: + t = objmap[obj] + t.public_symbols = t.public_symbols.union(symbols[obj]["PUBLIC"]) + t.undefined_symbols = t.undefined_symbols.union(symbols[obj]["UNDEFINED"]) + t.used_symbols = t.used_symbols.union(symbols[obj]["UNDEFINED"]) + + t.undefined_symbols = t.undefined_symbols.difference(t.public_symbols) + + # and the reverse map of public symbols to subsystem name + bld.env.symbol_map = {} + + for t in tgt_list: + for s in t.public_symbols: + if not s in bld.env.symbol_map: + bld.env.symbol_map[s] = [] + bld.env.symbol_map[s].append(real_name(t.sname)) + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + bld.env.public_symbols = {} + for t in tgt_list: + name = real_name(t.sname) + if name in bld.env.public_symbols: + bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t.public_symbols) + else: + bld.env.public_symbols[name] = t.public_symbols + if t.samba_type in ['LIBRARY', 'PLUGIN']: + for dep in t.add_objects: + t2 = bld.get_tgen_by_name(dep) + bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) + bld.env.public_symbols[name] = bld.env.public_symbols[name].union(t2.public_symbols) + + bld.env.used_symbols = {} + for t in tgt_list: + name = real_name(t.sname) + if name in bld.env.used_symbols: + bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t.used_symbols) + else: + bld.env.used_symbols[name] = t.used_symbols + if t.samba_type in ['LIBRARY', 'PLUGIN']: + for dep in t.add_objects: + t2 = bld.get_tgen_by_name(dep) + bld.ASSERT(t2 is not None, "Library '%s' has unknown dependency '%s'" % (name, dep)) + bld.env.used_symbols[name] = bld.env.used_symbols[name].union(t2.used_symbols) + + +def build_library_dict(bld, tgt_list): + '''build the library_dict dictionary''' + + if bld.env.library_dict: + return + + bld.env.library_dict = {} + + for t in tgt_list: + if t.samba_type in [ 'LIBRARY', 'PLUGIN', 'PYTHON' ]: + linkpath = os.path.realpath(t.link_task.outputs[0].abspath(bld.env)) + bld.env.library_dict[linkpath] = t.sname + + +def build_syslib_sets(bld, tgt_list): + '''build the public_symbols for all syslibs''' + + if bld.env.syslib_symbols: + return + + # work out what syslibs we depend on, and what targets those are used in + syslibs = {} + objmap = {} + for t in tgt_list: + if getattr(t, 'uselib', []) and t.samba_type in [ 'LIBRARY', 'PLUGIN', 'BINARY', 'PYTHON' ]: + for lib in t.uselib: + if lib in ['PYEMBED', 'PYEXT']: + lib = "python" + if not lib in syslibs: + syslibs[lib] = [] + syslibs[lib].append(t) + + # work out the paths to each syslib + syslib_paths = [] + for lib in syslibs: + path = find_syslib_path(bld, lib, syslibs[lib]) + if path is None: + Logs.warn("Unable to find syslib path for %s" % lib) + if path is not None: + syslib_paths.append(path) + objmap[path] = lib.lower() + + # add in libc + syslib_paths.append(bld.env.libc_path) + objmap[bld.env.libc_path] = 'c' + + symbols = symbols_extract(bld, syslib_paths, dynamic=True) + + # keep a map of syslib names to public symbols + bld.env.syslib_symbols = {} + for lib in symbols: + bld.env.syslib_symbols[lib] = symbols[lib]["PUBLIC"] + + # add to the map of symbols to dependencies + for lib in symbols: + for sym in symbols[lib]["PUBLIC"]: + if not sym in bld.env.symbol_map: + bld.env.symbol_map[sym] = [] + bld.env.symbol_map[sym].append(objmap[lib]) + + # keep the libc symbols as well, as these are useful for some of the + # sanity checks + bld.env.libc_symbols = symbols[bld.env.libc_path]["PUBLIC"] + + # add to the combined map of dependency name to public_symbols + for lib in bld.env.syslib_symbols: + bld.env.public_symbols[objmap[lib]] = bld.env.syslib_symbols[lib] + + +def build_autodeps(bld, t): + '''build the set of dependencies for a target''' + deps = set() + name = real_name(t.sname) + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + for sym in t.undefined_symbols: + if sym in t.public_symbols: + continue + if sym in bld.env.symbol_map: + depname = bld.env.symbol_map[sym] + if depname == [ name ]: + # self dependencies aren't interesting + continue + if t.in_library == depname: + # no need to depend on the library we are part of + continue + if depname[0] in ['c', 'python']: + # these don't go into autodeps + continue + if targets[depname[0]] in [ 'SYSLIB' ]: + deps.add(depname[0]) + continue + t2 = bld.get_tgen_by_name(depname[0]) + if len(t2.in_library) != 1: + deps.add(depname[0]) + continue + if t2.in_library == t.in_library: + # if we're part of the same library, we don't need to autodep + continue + deps.add(t2.in_library[0]) + t.autodeps = deps + + +def build_library_names(bld, tgt_list): + '''add a in_library attribute to all targets that are part of a library''' + + if bld.env.done_build_library_names: + return + + for t in tgt_list: + t.in_library = [] + + for t in tgt_list: + if t.samba_type in ['LIBRARY', 'PLUGIN']: + for obj in t.samba_deps_extended: + t2 = bld.get_tgen_by_name(obj) + if t2 and t2.samba_type in [ 'SUBSYSTEM', 'BUILTIN', 'ASN1' ]: + if not t.sname in t2.in_library: + t2.in_library.append(t.sname) + bld.env.done_build_library_names = True + + +def check_library_deps(bld, t): + '''check that all the autodeps that have mutual dependency of this + target are in the same library as the target''' + + name = real_name(t.sname) + + if len(t.in_library) > 1: + Logs.warn("WARNING: Target '%s' in multiple libraries: %s" % (t.sname, t.in_library)) + + for dep in t.autodeps: + t2 = bld.get_tgen_by_name(dep) + if t2 is None: + continue + for dep2 in t2.autodeps: + if dep2 == name and t.in_library != t2.in_library: + Logs.warn("WARNING: mutual dependency %s <=> %s" % (name, real_name(t2.sname))) + Logs.warn("Libraries should match. %s != %s" % (t.in_library, t2.in_library)) + # raise Errors.WafError("illegal mutual dependency") + + +def check_syslib_collisions(bld, tgt_list): + '''check if a target has any symbol collisions with a syslib + + We do not want any code in Samba to use a symbol name from a + system library. The chance of that causing problems is just too + high. Note that libreplace uses a rep_XX approach of renaming + symbols via macros + ''' + + has_error = False + for t in tgt_list: + for lib in bld.env.syslib_symbols: + common = t.public_symbols.intersection(bld.env.syslib_symbols[lib]) + if common: + Logs.error("ERROR: Target '%s' has symbols '%s' which is also in syslib '%s'" % (t.sname, common, lib)) + has_error = True + if has_error: + raise Errors.WafError("symbols in common with system libraries") + + +def check_dependencies(bld, t): + '''check for dependencies that should be changed''' + + if bld.get_tgen_by_name(t.sname + ".objlist"): + return + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + remaining = t.undefined_symbols.copy() + remaining = remaining.difference(t.public_symbols) + + sname = real_name(t.sname) + + deps = set(t.samba_deps) + for d in t.samba_deps: + if targets[d] in [ 'EMPTY', 'DISABLED', 'SYSLIB', 'GENERATOR' ]: + continue + bld.ASSERT(d in bld.env.public_symbols, "Failed to find symbol list for dependency '%s'" % d) + diff = remaining.intersection(bld.env.public_symbols[d]) + if not diff and targets[sname] != 'LIBRARY': + Logs.info("Target '%s' has no dependency on %s" % (sname, d)) + else: + remaining = remaining.difference(diff) + + t.unsatisfied_symbols = set() + needed = {} + for sym in remaining: + if sym in bld.env.symbol_map: + dep = bld.env.symbol_map[sym] + if not dep[0] in needed: + needed[dep[0]] = set() + needed[dep[0]].add(sym) + else: + t.unsatisfied_symbols.add(sym) + + for dep in needed: + Logs.info("Target '%s' should add dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) + + + +def check_syslib_dependencies(bld, t): + '''check for syslib depenencies''' + + if bld.get_tgen_by_name(t.sname + ".objlist"): + return + + sname = real_name(t.sname) + + remaining = set() + + features = TO_LIST(t.features) + if 'pyembed' in features or 'pyext' in features: + if 'python' in bld.env.public_symbols: + t.unsatisfied_symbols = t.unsatisfied_symbols.difference(bld.env.public_symbols['python']) + + needed = {} + for sym in t.unsatisfied_symbols: + if sym in bld.env.symbol_map: + dep = bld.env.symbol_map[sym][0] + if dep == 'c': + continue + if not dep in needed: + needed[dep] = set() + needed[dep].add(sym) + else: + remaining.add(sym) + + for dep in needed: + Logs.info("Target '%s' should add syslib dep '%s' for symbols %s" % (sname, dep, " ".join(needed[dep]))) + + if remaining: + debug("deps: Target '%s' has unsatisfied symbols: %s" % (sname, " ".join(remaining))) + + + +def symbols_symbolcheck(task): + '''check the internal dependency lists''' + bld = task.env.bld + tgt_list = get_tgt_list(bld) + + build_symbol_sets(bld, tgt_list) + build_library_names(bld, tgt_list) + + for t in tgt_list: + t.autodeps = set() + if getattr(t, 'source', ''): + build_autodeps(bld, t) + + for t in tgt_list: + check_dependencies(bld, t) + + for t in tgt_list: + check_library_deps(bld, t) + +def symbols_syslibcheck(task): + '''check the syslib dependencies''' + bld = task.env.bld + tgt_list = get_tgt_list(bld) + + build_syslib_sets(bld, tgt_list) + check_syslib_collisions(bld, tgt_list) + + for t in tgt_list: + check_syslib_dependencies(bld, t) + + +def symbols_whyneeded(task): + """check why 'target' needs to link to 'subsystem'""" + bld = task.env.bld + tgt_list = get_tgt_list(bld) + + why = Options.options.WHYNEEDED.split(":") + if len(why) != 2: + raise Errors.WafError("usage: WHYNEEDED=TARGET:DEPENDENCY") + target = why[0] + subsystem = why[1] + + build_symbol_sets(bld, tgt_list) + build_library_names(bld, tgt_list) + build_syslib_sets(bld, tgt_list) + + Logs.info("Checking why %s needs to link to %s" % (target, subsystem)) + if not target in bld.env.used_symbols: + Logs.warn("unable to find target '%s' in used_symbols dict" % target) + return + if not subsystem in bld.env.public_symbols: + Logs.warn("unable to find subsystem '%s' in public_symbols dict" % subsystem) + return + overlap = bld.env.used_symbols[target].intersection(bld.env.public_symbols[subsystem]) + if not overlap: + Logs.info("target '%s' doesn't use any public symbols from '%s'" % (target, subsystem)) + else: + Logs.info("target '%s' uses symbols %s from '%s'" % (target, overlap, subsystem)) + + +def report_duplicate(bld, binname, sym, libs, fail_on_error): + '''report duplicated symbols''' + if sym in ['_init', '_fini', '_edata', '_end', '__bss_start']: + return + libnames = [] + for lib in libs: + if lib in bld.env.library_dict: + libnames.append(bld.env.library_dict[lib]) + else: + libnames.append(lib) + if fail_on_error: + raise Errors.WafError("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) + else: + print("%s: Symbol %s linked in multiple libraries %s" % (binname, sym, libnames)) + + +def symbols_dupcheck_binary(bld, binname, fail_on_error): + '''check for duplicated symbols in one binary''' + + libs = get_libs_recursive(bld, binname, set()) + symlist = symbols_extract(bld, libs, dynamic=True) + + symmap = {} + for libpath in symlist: + for sym in symlist[libpath]['PUBLIC']: + if sym == '_GLOBAL_OFFSET_TABLE_': + continue + if not sym in symmap: + symmap[sym] = set() + symmap[sym].add(libpath) + for sym in symmap: + if len(symmap[sym]) > 1: + for libpath in symmap[sym]: + if libpath in bld.env.library_dict: + report_duplicate(bld, binname, sym, symmap[sym], fail_on_error) + break + +def symbols_dupcheck(task, fail_on_error=False): + '''check for symbols defined in two different subsystems''' + bld = task.env.bld + tgt_list = get_tgt_list(bld) + + targets = LOCAL_CACHE(bld, 'TARGET_TYPE') + + build_library_dict(bld, tgt_list) + for t in tgt_list: + if t.samba_type == 'BINARY': + binname = os.path.relpath(t.link_task.outputs[0].abspath(bld.env), os.getcwd()) + symbols_dupcheck_binary(bld, binname, fail_on_error) + + +def symbols_dupcheck_fatal(task): + '''check for symbols defined in two different subsystems (and fail if duplicates are found)''' + symbols_dupcheck(task, fail_on_error=True) + + +def SYMBOL_CHECK(bld): + '''check our dependency lists''' + if Options.options.SYMBOLCHECK: + bld.SET_BUILD_GROUP('symbolcheck') + task = bld(rule=symbols_symbolcheck, always=True, name='symbol checking') + task.env.bld = bld + + bld.SET_BUILD_GROUP('syslibcheck') + task = bld(rule=symbols_syslibcheck, always=True, name='syslib checking') + task.env.bld = bld + + bld.SET_BUILD_GROUP('syslibcheck') + task = bld(rule=symbols_dupcheck, always=True, name='symbol duplicate checking') + task.env.bld = bld + + if Options.options.WHYNEEDED: + bld.SET_BUILD_GROUP('syslibcheck') + task = bld(rule=symbols_whyneeded, always=True, name='check why a dependency is needed') + task.env.bld = bld + + +Build.BuildContext.SYMBOL_CHECK = SYMBOL_CHECK + +def DUP_SYMBOL_CHECK(bld): + if Options.options.DUP_SYMBOLCHECK and bld.env.DEVELOPER: + '''check for duplicate symbols''' + bld.SET_BUILD_GROUP('syslibcheck') + task = bld(rule=symbols_dupcheck_fatal, always=True, name='symbol duplicate checking') + task.env.bld = bld + +Build.BuildContext.DUP_SYMBOL_CHECK = DUP_SYMBOL_CHECK diff --git a/buildtools/wafsamba/test_duplicate_symbol.sh b/buildtools/wafsamba/test_duplicate_symbol.sh new file mode 100755 index 0000000..dffac75 --- /dev/null +++ b/buildtools/wafsamba/test_duplicate_symbol.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# Run the waf duplicate symbol check, wrapped in subunit. + +. testprogs/blackbox/subunit.sh + +subunit_start_test duplicate_symbols + +PYTHONHASHSEED=1 +export PYTHONHASHSEED + +if $PYTHON ./buildtools/bin/waf build --dup-symbol-check; then + subunit_pass_test duplicate_symbols +else + echo | subunit_fail_test duplicate_symbols +fi diff --git a/buildtools/wafsamba/tests/__init__.py b/buildtools/wafsamba/tests/__init__.py new file mode 100644 index 0000000..ae27418 --- /dev/null +++ b/buildtools/wafsamba/tests/__init__.py @@ -0,0 +1,35 @@ +# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org> + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +"""Tests for wafsamba.""" + +from unittest import ( + TestCase, + TestLoader, + ) + +def test_suite(): + names = [ + 'abi', + 'bundled', + 'utils', + ] + module_names = ['wafsamba.tests.test_' + name for name in names] + loader = TestLoader() + result = loader.suiteClass() + suite = loader.loadTestsFromNames(module_names) + result.addTests(suite) + return result diff --git a/buildtools/wafsamba/tests/test_abi.py b/buildtools/wafsamba/tests/test_abi.py new file mode 100644 index 0000000..ffd5a56 --- /dev/null +++ b/buildtools/wafsamba/tests/test_abi.py @@ -0,0 +1,134 @@ +# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org> + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +from wafsamba.tests import TestCase + +from wafsamba.samba_abi import ( + abi_write_vscript, + normalise_signature, + ) + +from io import StringIO + + +class NormaliseSignatureTests(TestCase): + + def test_function_simple(self): + self.assertEqual("int (const struct GUID *, const struct GUID *)", + normalise_signature("$2 = {int (const struct GUID *, const struct GUID *)} 0xe871 <GUID_compare>")) + + def test_maps_Bool(self): + # Some types have different internal names + self.assertEqual("bool (const struct GUID *)", + normalise_signature("$1 = {_Bool (const struct GUID *)} 0xe75b <GUID_all_zero>")) + + def test_function_keep(self): + self.assertEqual( + "enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)", + normalise_signature("enum ndr_err_code (struct ndr_push *, int, const union winreg_Data *)")) + + def test_struct_constant(self): + self.assertEqual( + 'uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0', + normalise_signature('$239 = {uuid = {time_low = 0, time_mid = 0, time_hi_and_version = 0, clock_seq = "\\000", node = "\\000\\000\\000\\000\\000"}, if_version = 0}')) + + def test_incomplete_sequence(self): + # Newer versions of gdb insert these incomplete sequence elements + self.assertEqual( + 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', + normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237", <incomplete sequence \\350>, node = "\\b\\000+\\020H`"}, if_version = 2}')) + self.assertEqual( + 'uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2', + normalise_signature('$244 = {uuid = {time_low = 2324192516, time_mid = 7403, time_hi_and_version = 4553, clock_seq = "\\237\\350", node = "\\b\\000+\\020H`"}, if_version = 2}')) + + +class WriteVscriptTests(TestCase): + + def test_one(self): + f = StringIO() + abi_write_vscript(f, "MYLIB", "1.0", [], { + "old": "1.0", + "new": "1.0"}, ["*"]) + self.assertEqual(f.getvalue(), """\ +1.0 { +\tglobal: +\t\t*; +\tlocal: +\t\t_end; +\t\t__bss_start; +\t\t_edata; +}; +""") + + def test_simple(self): + # No restrictions. + f = StringIO() + abi_write_vscript(f, "MYLIB", "1.0", ["0.1"], { + "old": "0.1", + "new": "1.0"}, ["*"]) + self.assertEqual(f.getvalue(), """\ +MYLIB_0.1 { +\tglobal: +\t\told; +}; + +1.0 { +\tglobal: +\t\t*; +\tlocal: +\t\t_end; +\t\t__bss_start; +\t\t_edata; +}; +""") + + def test_exclude(self): + f = StringIO() + abi_write_vscript(f, "MYLIB", "1.0", [], { + "exc_old": "0.1", + "old": "0.1", + "new": "1.0"}, ["!exc_*"]) + self.assertEqual(f.getvalue(), """\ +1.0 { +\tglobal: +\t\t*; +\tlocal: +\t\texc_*; +\t\t_end; +\t\t__bss_start; +\t\t_edata; +}; +""") + + def test_excludes_and_includes(self): + f = StringIO() + abi_write_vscript(f, "MYLIB", "1.0", [], { + "pub_foo": "1.0", + "exc_bar": "1.0", + "other": "1.0" + }, ["pub_*", "!exc_*"]) + self.assertEqual(f.getvalue(), """\ +1.0 { +\tglobal: +\t\tpub_*; +\tlocal: +\t\texc_*; +\t\t_end; +\t\t__bss_start; +\t\t_edata; +\t\t*; +}; +""") diff --git a/buildtools/wafsamba/tests/test_bundled.py b/buildtools/wafsamba/tests/test_bundled.py new file mode 100644 index 0000000..a8e9821 --- /dev/null +++ b/buildtools/wafsamba/tests/test_bundled.py @@ -0,0 +1,27 @@ +# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org> + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +from wafsamba.tests import TestCase + +from wafsamba.samba_bundled import ( + tuplize_version, + ) + + +class TuplizeVersionTests(TestCase): + + def test_simple(self): + self.assertEqual((1, 2, 10), tuplize_version("1.2.10")) diff --git a/buildtools/wafsamba/tests/test_utils.py b/buildtools/wafsamba/tests/test_utils.py new file mode 100644 index 0000000..77fc55c --- /dev/null +++ b/buildtools/wafsamba/tests/test_utils.py @@ -0,0 +1,76 @@ +# Copyright (C) 2012 Jelmer Vernooij <jelmer@samba.org> + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published by +# the Free Software Foundation; either version 2.1 of the License, or +# (at your option) any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. + +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +from wafsamba.tests import TestCase + +from wafsamba.samba_utils import ( + TO_LIST, + dict_concat, + subst_vars_error, + unique_list, + ) + +class ToListTests(TestCase): + + def test_none(self): + self.assertEqual([], TO_LIST(None)) + + def test_already_list(self): + self.assertEqual(["foo", "bar", 1], TO_LIST(["foo", "bar", 1])) + + def test_default_delimiter(self): + self.assertEqual(["foo", "bar"], TO_LIST("foo bar")) + self.assertEqual(["foo", "bar"], TO_LIST(" foo bar ")) + self.assertEqual(["foo ", "bar"], TO_LIST(" \"foo \" bar ")) + + def test_delimiter(self): + self.assertEqual(["foo", "bar"], TO_LIST("foo,bar", ",")) + self.assertEqual([" foo", "bar "], TO_LIST(" foo,bar ", ",")) + self.assertEqual([" \" foo\"", " bar "], TO_LIST(" \" foo\", bar ", ",")) + + +class UniqueListTests(TestCase): + + def test_unique_list(self): + self.assertEqual(["foo", "bar"], unique_list(["foo", "bar", "foo"])) + + +class SubstVarsErrorTests(TestCase): + + def test_valid(self): + self.assertEqual("", subst_vars_error("", {})) + self.assertEqual("FOO bar", subst_vars_error("${F} bar", {"F": "FOO"})) + + def test_invalid(self): + self.assertRaises(KeyError, subst_vars_error, "${F}", {}) + + +class DictConcatTests(TestCase): + + def test_empty(self): + ret = {} + dict_concat(ret, {}) + self.assertEqual({}, ret) + + def test_same(self): + ret = {"foo": "bar"} + dict_concat(ret, {"foo": "bla"}) + self.assertEqual({"foo": "bar"}, ret) + + def test_simple(self): + ret = {"foo": "bar"} + dict_concat(ret, {"blie": "bla"}) + self.assertEqual({"foo": "bar", "blie": "bla"}, ret) diff --git a/buildtools/wafsamba/wafsamba.py b/buildtools/wafsamba/wafsamba.py new file mode 100644 index 0000000..e1c2877 --- /dev/null +++ b/buildtools/wafsamba/wafsamba.py @@ -0,0 +1,1227 @@ +# a waf tool to add autoconf-like macros to the configure section +# and for SAMBA_ macros for building libraries, binaries etc + +import os, sys, re, shutil, fnmatch +from waflib import Build, Options, Task, Utils, TaskGen, Logs, Context, Errors +from waflib.Configure import conf +from waflib.Logs import debug +from samba_utils import SUBST_VARS_RECURSIVE +TaskGen.task_gen.apply_verif = Utils.nada + +# bring in the other samba modules +from samba_utils import * +from samba_utils import symlink +from samba_version import * +from samba_autoconf import * +from samba_patterns import * +from samba_pidl import * +from samba_autoproto import * +from samba_python import * +from samba_perl import * +from samba_deps import * +from samba_bundled import * +from samba_third_party import * +import samba_cross +import samba_install +import samba_conftests +import samba_abi +import samba_headers +import generic_cc +import samba_dist +import samba_wildcard +import symbols +import pkgconfig +import configure_file +import samba_waf18 + +LIB_PATH="shared" + +os.environ['PYTHONUNBUFFERED'] = '1' + +if Context.HEXVERSION not in (0x2001a00,): + Logs.error(''' +Please use the version of waf that comes with Samba, not +a system installed version. See http://wiki.samba.org/index.php/Waf +for details. + +Alternatively, please run ./configure and make as usual. That will +call the right version of waf.''') + sys.exit(1) + +@conf +def SAMBA_BUILD_ENV(conf): + '''create the samba build environment''' + conf.env.BUILD_DIRECTORY = conf.bldnode.abspath() + mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, LIB_PATH)) + mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, LIB_PATH, "private")) + mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, "modules")) + mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, "plugins")) + mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, 'python/samba/dcerpc')) + # this allows all of the bin/shared and bin/python targets + # to be expressed in terms of build directory paths + mkdir_p(os.path.join(conf.env.BUILD_DIRECTORY, 'default')) + for (source, target) in [('shared', 'shared'), ('modules', 'modules'), ('plugins', 'plugins'), ('python', 'python')]: + link_target = os.path.join(conf.env.BUILD_DIRECTORY, 'default/' + target) + if not os.path.lexists(link_target): + symlink('../' + source, link_target) + + # get perl to put the blib files in the build directory + blib_bld = os.path.join(conf.env.BUILD_DIRECTORY, 'default/pidl/blib') + blib_src = os.path.join(conf.srcnode.abspath(), 'pidl/blib') + mkdir_p(blib_bld + '/man1') + mkdir_p(blib_bld + '/man3') + if os.path.islink(blib_src): + os.unlink(blib_src) + elif os.path.exists(blib_src): + shutil.rmtree(blib_src) + + +def ADD_INIT_FUNCTION(bld, subsystem, target, init_function): + '''add an init_function to the list for a subsystem''' + if init_function is None: + return + bld.ASSERT(subsystem is not None, "You must specify a subsystem for init_function '%s'" % init_function) + cache = LOCAL_CACHE(bld, 'INIT_FUNCTIONS') + if subsystem not in cache: + cache[subsystem] = [] + cache[subsystem].append( { 'TARGET':target, 'INIT_FUNCTION':init_function } ) +Build.BuildContext.ADD_INIT_FUNCTION = ADD_INIT_FUNCTION + + +def generate_empty_file(task): + task.outputs[0].write('') + return 0 + +################################################################# +def SAMBA_LIBRARY(bld, libname, source, + deps='', + public_deps='', + includes='', + public_headers=None, + public_headers_install=True, + private_headers=None, + header_path=None, + pc_files=None, + vnum=None, + soname=None, + cflags='', + cflags_end=None, + ldflags='', + external_library=False, + realname=None, + autoproto=None, + autoproto_extra_source='', + group='main', + depends_on='', + local_include=True, + global_include=True, + vars=None, + subdir=None, + install_path=None, + install=True, + pyembed=False, + pyext=False, + target_type='LIBRARY', + bundled_name=None, + link_name=None, + abi_directory=None, + abi_match=None, + orig_vscript_map=None, + hide_symbols=False, + manpages=None, + private_library=False, + grouping_library=False, + require_builtin_deps=False, + provide_builtin_linking=False, + builtin_cflags='', + force_unversioned=False, + allow_undefined_symbols=False, + allow_warnings=False, + enabled=True): + '''define a Samba library''' + + # We support: + # - LIBRARY: this can be used to link via -llibname + # - MODULE: this is module from SAMBA_MODULE() + # - PLUGIN: this is plugin for external consumers to be + # loaded via dlopen() + # - PYTHON: a python C binding library + # + if target_type not in ['LIBRARY', 'MODULE', 'PLUGIN', 'PYTHON']: + raise Errors.WafError("target_type[%s] not supported in SAMBA_LIBRARY('%s')" % + (target_type, libname)) + + if require_builtin_deps: + # For now we only support require_builtin_deps only for libraries, plugins + if target_type not in ['LIBRARY', 'PLUGIN']: + raise Errors.WafError("target_type[%s] not supported SAMBA_LIBRARY('%s', require_builtin_deps=True)" % + (target_type, libname)) + + if private_library and public_headers: + raise Errors.WafError("private library '%s' must not have public header files" % + libname) + + if orig_vscript_map and not private_library: + raise Errors.WafError("public library '%s' must not have orig_vscript_map" % + libname) + + if orig_vscript_map and abi_directory: + raise Errors.WafError("private library '%s' with orig_vscript_map must not have abi_directory" % + libname) + if orig_vscript_map and abi_match: + raise Errors.WafError("private library '%s' with orig_vscript_map must not have abi_match" % + libname) + + if force_unversioned and private_library: + raise Errors.WafError("private library '%s': can't have force_unversioned=True" % + libname) + + if force_unversioned and realname is None: + raise Errors.WafError("library '%s': force_unversioned=True needs realname too" % + libname) + + if LIB_MUST_BE_PRIVATE(bld, libname) and target_type not in ['PLUGIN']: + private_library = True + + if force_unversioned: + private_library = False + + if not enabled: + SET_TARGET_TYPE(bld, libname, 'DISABLED') + return + + source = bld.EXPAND_VARIABLES(source, vars=vars) + if subdir: + source = bld.SUBDIR(subdir, source) + + # remember empty libraries, so we can strip the dependencies + if ((source == '') or (source == [])): + if deps == '' and public_deps == '': + SET_TARGET_TYPE(bld, libname, 'EMPTY') + return + empty_c = libname + '.empty.c' + bld.SAMBA_GENERATOR('%s_empty_c' % libname, + rule=generate_empty_file, + target=empty_c) + source=empty_c + + samba_deps = deps + ' ' + public_deps + samba_deps = TO_LIST(samba_deps) + + if BUILTIN_LIBRARY(bld, libname): + builtin_target = libname + '.builtin.objlist' + builtin_cflags_end = '-D_PUBLIC_=_PRIVATE_' + empty_target = libname + obj_target = None + else: + if provide_builtin_linking: + builtin_target = libname + '.builtin.objlist' + builtin_cflags_end = '-D_PUBLIC_=_PRIVATE_' + else: + builtin_target = None + empty_target = None + obj_target = libname + '.objlist' + if require_builtin_deps: + # hide the builtin deps from the callers + samba_deps = TO_LIST('') + dep_target = obj_target + + if group == 'libraries': + subsystem_group = 'main' + else: + subsystem_group = group + + # first create a target for building the object files for this library + # by separating in this way, we avoid recompiling the C files + # separately for the install library and the build library + if builtin_target: + __t = __SAMBA_SUBSYSTEM_BUILTIN(bld, builtin_target, source, + deps=deps, + public_deps=public_deps, + includes=includes, + header_path=header_path, + builtin_cflags=builtin_cflags, + builtin_cflags_end=builtin_cflags_end, + group=group, + depends_on=depends_on, + local_include=local_include, + global_include=global_include, + allow_warnings=allow_warnings) + builtin_subsystem = __t + else: + builtin_subsystem = None + if obj_target: + bld.SAMBA_SUBSYSTEM(obj_target, + source = source, + deps = deps, + public_deps = public_deps, + includes = includes, + public_headers = public_headers, + public_headers_install = public_headers_install, + private_headers= private_headers, + header_path = header_path, + cflags = cflags, + cflags_end = cflags_end, + group = subsystem_group, + autoproto = autoproto, + autoproto_extra_source=autoproto_extra_source, + depends_on = depends_on, + hide_symbols = hide_symbols, + allow_warnings = allow_warnings, + pyembed = pyembed, + pyext = pyext, + local_include = local_include, + __require_builtin_deps=require_builtin_deps, + global_include = global_include) + else: + et = bld.SAMBA_SUBSYSTEM(empty_target, + source=[], + __force_empty=True, + __require_builtin_deps=True) + et.samba_builtin_subsystem = builtin_subsystem + + if BUILTIN_LIBRARY(bld, libname): + return + + if not SET_TARGET_TYPE(bld, libname, target_type): + return + + # the library itself will depend on that object target + samba_deps.append(dep_target) + + realname = bld.map_shlib_extension(realname, python=(target_type=='PYTHON')) + link_name = bld.map_shlib_extension(link_name, python=(target_type=='PYTHON')) + + # we don't want any public libraries without version numbers + if (not private_library and target_type != 'PYTHON' and not realname): + if vnum is None and soname is None: + raise Errors.WafError("public library '%s' must have a vnum" % + libname) + if pc_files is None: + raise Errors.WafError("public library '%s' must have pkg-config file" % + libname) + if public_headers is None: + raise Errors.WafError("public library '%s' must have header files" % + libname) + + abi_vnum = vnum + + if bundled_name is not None: + pass + elif target_type == 'PYTHON' or realname or not private_library: + bundled_name = libname.replace('_', '-') + else: + assert (private_library is True and realname is None) + bundled_name = PRIVATE_NAME(bld, libname.replace('_', '-')) + vnum = None + + ldflags = TO_LIST(ldflags) + if bld.env['ENABLE_RELRO'] is True: + ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) + + features = 'c cshlib symlink_lib install_lib' + if pyext: + features += ' pyext' + if pyembed: + features += ' pyembed' + + if abi_directory: + features += ' abi_check' + + if pyembed and bld.env['PYTHON_SO_ABI_FLAG']: + # For ABI checking, we don't care about the Python version. + # Remove the Python ABI tag (e.g. ".cpython-35m") + abi_flag = bld.env['PYTHON_SO_ABI_FLAG'] + replacement = '' + version_libname = libname.replace(abi_flag, replacement) + else: + version_libname = libname + + vscript = None + if bld.env.HAVE_LD_VERSION_SCRIPT: + if force_unversioned: + version = None + elif private_library: + version = bld.env.PRIVATE_VERSION + elif vnum: + version = "%s_%s" % (libname, vnum) + else: + version = None + if version: + vscript = "%s.vscript" % libname + if orig_vscript_map: + bld.VSCRIPT_MAP_PRIVATE(version_libname, orig_vscript_map, version, vscript) + else: + bld.ABI_VSCRIPT(version_libname, abi_directory, version, vscript, + abi_match, private_library) + fullname = apply_pattern(bundled_name, bld.env.cshlib_PATTERN) + fullpath = bld.path.find_or_declare(fullname) + vscriptpath = bld.path.find_or_declare(vscript) + if not fullpath: + raise Errors.WafError("unable to find fullpath for %s" % fullname) + if not vscriptpath: + raise Errors.WafError("unable to find vscript path for %s" % vscript) + bld.add_manual_dependency(fullpath, vscriptpath) + if bld.is_install: + # also make the .inst file depend on the vscript + instname = apply_pattern(bundled_name + '.inst', bld.env.cshlib_PATTERN) + bld.add_manual_dependency(bld.path.find_or_declare(instname), bld.path.find_or_declare(vscript)) + vscript = os.path.join(bld.path.abspath(bld.env), vscript) + + bld.SET_BUILD_GROUP(group) + t = bld( + features = features, + source = [], + target = bundled_name, + depends_on = depends_on, + samba_ldflags = ldflags, + samba_deps = samba_deps, + samba_includes = includes, + version_script = vscript, + version_libname = version_libname, + local_include = local_include, + global_include = global_include, + vnum = vnum, + soname = soname, + install_path = None, + samba_inst_path = install_path, + name = libname, + samba_realname = realname, + samba_install = install, + abi_directory = "%s/%s" % (bld.path.abspath(), abi_directory), + abi_match = abi_match, + abi_vnum = abi_vnum, + private_library = private_library, + grouping_library=grouping_library, + allow_undefined_symbols=allow_undefined_symbols, + samba_require_builtin_deps=False, + samba_builtin_subsystem=builtin_subsystem, + ) + + if realname and not link_name: + link_name = 'shared/%s' % realname + + if link_name: + if 'waflib.extras.compat15' in sys.modules: + link_name = 'default/' + link_name + t.link_name = link_name + + if pc_files is not None and not private_library: + if pyembed: + bld.PKG_CONFIG_FILES(pc_files, vnum=vnum, extra_name=bld.env['PYTHON_SO_ABI_FLAG']) + else: + bld.PKG_CONFIG_FILES(pc_files, vnum=vnum) + + if (manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and + bld.env['XSLTPROC_MANPAGES']): + bld.MANPAGES(manpages, install) + + +Build.BuildContext.SAMBA_LIBRARY = SAMBA_LIBRARY + + +################################################################# +def SAMBA_BINARY(bld, binname, source, + deps='', + includes='', + public_headers=None, + private_headers=None, + header_path=None, + modules=None, + ldflags=None, + cflags='', + cflags_end=None, + autoproto=None, + use_hostcc=False, + use_global_deps=True, + compiler=None, + group='main', + manpages=None, + local_include=True, + global_include=True, + subsystem_name=None, + allow_warnings=False, + pyembed=False, + vars=None, + subdir=None, + install=True, + install_path=None, + enabled=True, + fuzzer=False, + for_selftest=False): + '''define a Samba binary''' + + if for_selftest: + install=False + if not bld.CONFIG_GET('ENABLE_SELFTEST'): + enabled=False + + if not enabled: + SET_TARGET_TYPE(bld, binname, 'DISABLED') + return + + # Fuzzing builds do not build normal binaries + # however we must build asn1compile etc + + if not use_hostcc and bld.env.enable_fuzzing != fuzzer: + SET_TARGET_TYPE(bld, binname, 'DISABLED') + return + + if fuzzer: + install = False + if ldflags is None: + ldflags = bld.env['FUZZ_TARGET_LDFLAGS'] + + if not SET_TARGET_TYPE(bld, binname, 'BINARY'): + return + + features = 'c cprogram symlink_bin install_bin' + if pyembed: + features += ' pyembed' + + obj_target = binname + '.objlist' + + source = bld.EXPAND_VARIABLES(source, vars=vars) + if subdir: + source = bld.SUBDIR(subdir, source) + source = unique_list(TO_LIST(source)) + + if group == 'binaries': + subsystem_group = 'main' + elif group == 'build_compilers': + subsystem_group = 'compiler_libraries' + else: + subsystem_group = group + + # only specify PIE flags for binaries + pie_cflags = TO_LIST(cflags) + pie_ldflags = TO_LIST(ldflags) + if bld.env['ENABLE_PIE'] is True: + pie_cflags.extend(TO_LIST('-fPIE')) + pie_ldflags.extend(TO_LIST('-pie')) + if bld.env['ENABLE_RELRO'] is True: + pie_ldflags.extend(TO_LIST('-Wl,-z,relro,-z,now')) + + # first create a target for building the object files for this binary + # by separating in this way, we avoid recompiling the C files + # separately for the install binary and the build binary + bld.SAMBA_SUBSYSTEM(obj_target, + source = source, + deps = deps, + includes = includes, + cflags = pie_cflags, + cflags_end = cflags_end, + group = subsystem_group, + autoproto = autoproto, + subsystem_name = subsystem_name, + local_include = local_include, + global_include = global_include, + use_hostcc = use_hostcc, + pyext = pyembed, + allow_warnings = allow_warnings, + use_global_deps= use_global_deps) + + bld.SET_BUILD_GROUP(group) + + # the binary itself will depend on that object target + deps = TO_LIST(deps) + deps.append(obj_target) + + t = bld( + features = features, + source = [], + target = binname, + samba_deps = deps, + samba_includes = includes, + local_include = local_include, + global_include = global_include, + samba_modules = modules, + top = True, + samba_subsystem= subsystem_name, + install_path = None, + samba_inst_path= install_path, + samba_install = install, + samba_ldflags = pie_ldflags + ) + + if manpages is not None and 'XSLTPROC_MANPAGES' in bld.env and bld.env['XSLTPROC_MANPAGES']: + bld.MANPAGES(manpages, install) + +Build.BuildContext.SAMBA_BINARY = SAMBA_BINARY + + +################################################################# +def SAMBA_MODULE(bld, modname, source, + deps='', + includes='', + subsystem=None, + init_function=None, + module_init_name='samba_init_module', + autoproto=None, + autoproto_extra_source='', + cflags='', + cflags_end=None, + internal_module=True, + local_include=True, + global_include=True, + vars=None, + subdir=None, + enabled=True, + pyembed=False, + manpages=None, + allow_undefined_symbols=False, + allow_warnings=False, + install=True + ): + '''define a Samba module.''' + + bld.ASSERT(subsystem, "You must specify a subsystem for SAMBA_MODULE(%s)" % modname) + + source = bld.EXPAND_VARIABLES(source, vars=vars) + if subdir: + source = bld.SUBDIR(subdir, source) + + if internal_module or BUILTIN_LIBRARY(bld, modname): + # Do not create modules for disabled subsystems + if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': + return + bld.SAMBA_SUBSYSTEM(modname, source, + deps=deps, + includes=includes, + autoproto=autoproto, + autoproto_extra_source=autoproto_extra_source, + cflags=cflags, + cflags_end=cflags_end, + local_include=local_include, + global_include=global_include, + allow_warnings=allow_warnings, + enabled=enabled) + + bld.ADD_INIT_FUNCTION(subsystem, modname, init_function) + return + + if not enabled: + SET_TARGET_TYPE(bld, modname, 'DISABLED') + return + + # Do not create modules for disabled subsystems + if GET_TARGET_TYPE(bld, subsystem) == 'DISABLED': + return + + realname = modname + deps += ' ' + subsystem + while realname.startswith("lib"+subsystem+"_"): + realname = realname[len("lib"+subsystem+"_"):] + while realname.startswith(subsystem+"_"): + realname = realname[len(subsystem+"_"):] + + build_name = "%s_module_%s" % (subsystem, realname) + + realname = bld.make_libname(realname) + while realname.startswith("lib"): + realname = realname[len("lib"):] + + build_link_name = "modules/%s/%s" % (subsystem, realname) + + if init_function: + cflags += " -D%s=%s" % (init_function, module_init_name) + + bld.SAMBA_LIBRARY(modname, + source, + deps=deps, + includes=includes, + cflags=cflags, + cflags_end=cflags_end, + realname = realname, + autoproto = autoproto, + local_include=local_include, + global_include=global_include, + vars=vars, + bundled_name=build_name, + link_name=build_link_name, + install_path="${MODULESDIR}/%s" % subsystem, + pyembed=pyembed, + manpages=manpages, + allow_undefined_symbols=allow_undefined_symbols, + allow_warnings=allow_warnings, + private_library=True, + install=install + ) + + +Build.BuildContext.SAMBA_MODULE = SAMBA_MODULE + +################################################################# +def SAMBA_PLUGIN(bld, pluginname, source, + deps='', + includes='', + vnum=None, + soname=None, + cflags='', + ldflags='', + local_include=True, + global_include=True, + vars=None, + subdir=None, + realname=None, + autoproto=None, + autoproto_extra_source='', + install_path=None, + install=True, + manpages=None, + require_builtin_deps=True, + allow_undefined_symbols=False, + enabled=True): + '''define an external plugin.''' + + bld.ASSERT(realname, "You must specify a realname for SAMBA_PLUGIN(%s)" % pluginname) + + source = bld.EXPAND_VARIABLES(source, vars=vars) + if subdir: + source = bld.SUBDIR(subdir, source) + + build_name = "_plugin_%s" % (pluginname) + build_link_name = "plugins/%s" % (realname) + + bld.SAMBA_LIBRARY(pluginname, + source, + bundled_name=build_name, + link_name=build_link_name, + target_type='PLUGIN', + deps=deps, + includes=includes, + vnum=vnum, + soname=soname, + cflags=cflags, + ldflags=ldflags, + realname=realname, + autoproto=autoproto, + autoproto_extra_source=autoproto_extra_source, + local_include=local_include, + global_include=global_include, + vars=vars, + group='main', + install_path=install_path, + install=install, + manpages=manpages, + require_builtin_deps=require_builtin_deps, + builtin_cflags=cflags, + hide_symbols=True, + public_headers=[], + public_headers_install=False, + pc_files=[], + allow_undefined_symbols=allow_undefined_symbols, + allow_warnings=False, + enabled=enabled) +Build.BuildContext.SAMBA_PLUGIN = SAMBA_PLUGIN + +def __SAMBA_SUBSYSTEM_BUILTIN(bld, builtin_target, source, + deps='', + public_deps='', + includes='', + public_headers=None, + public_headers_install=True, + private_headers=None, + header_path=None, + builtin_cflags='', + builtin_cflags_end=None, + group='main', + autoproto=None, + autoproto_extra_source='', + depends_on='', + local_include=True, + global_include=True, + allow_warnings=False): + + bld.ASSERT(builtin_target.endswith('.builtin.objlist'), + "builtin_target[%s] does not end with '.builtin.objlist'" % + (builtin_target)) + return bld.SAMBA_SUBSYSTEM(builtin_target, source, + deps=deps, + public_deps=public_deps, + includes=includes, + public_headers=public_headers, + public_headers_install=public_headers_install, + private_headers=private_headers, + header_path=header_path, + cflags=builtin_cflags, + cflags_end=builtin_cflags_end, + hide_symbols=True, + group=group, + target_type='BUILTIN', + autoproto=autoproto, + autoproto_extra_source=autoproto_extra_source, + depends_on=depends_on, + local_include=local_include, + global_include=global_include, + allow_warnings=allow_warnings, + __require_builtin_deps=True) + +################################################################# +def SAMBA_SUBSYSTEM(bld, modname, source, + deps='', + public_deps='', + __force_empty=False, + includes='', + public_headers=None, + public_headers_install=True, + private_headers=None, + header_path=None, + cflags='', + cflags_end=None, + group='main', + target_type='SUBSYSTEM', + init_function_sentinel=None, + autoproto=None, + autoproto_extra_source='', + depends_on='', + local_include=True, + local_include_first=True, + global_include=True, + subsystem_name=None, + enabled=True, + use_hostcc=False, + use_global_deps=True, + vars=None, + subdir=None, + hide_symbols=False, + __require_builtin_deps=False, + provide_builtin_linking=False, + builtin_cflags='', + allow_warnings=False, + pyext=False, + pyembed=False): + '''define a Samba subsystem''' + + # We support: + # - SUBSYSTEM: a normal subsystem from SAMBA_SUBSYSTEM() + # - BUILTIN: a hidden subsystem from __SAMBA_SUBSYSTEM_BUILTIN() + if target_type not in ['SUBSYSTEM', 'BUILTIN']: + raise Errors.WafError("target_type[%s] not supported in SAMBA_SUBSYSTEM('%s')" % + (target_type, modname)) + + if not enabled: + SET_TARGET_TYPE(bld, modname, 'DISABLED') + return + + # remember empty subsystems, so we can strip the dependencies + if ((source == '') or (source == [])): + if not __force_empty and deps == '' and public_deps == '': + SET_TARGET_TYPE(bld, modname, 'EMPTY') + return + empty_c = modname + '.empty.c' + bld.SAMBA_GENERATOR('%s_empty_c' % modname, + rule=generate_empty_file, + target=empty_c) + source=empty_c + + if not SET_TARGET_TYPE(bld, modname, target_type): + return + + source = bld.EXPAND_VARIABLES(source, vars=vars) + if subdir: + source = bld.SUBDIR(subdir, source) + source = unique_list(TO_LIST(source)) + + deps += ' ' + public_deps + + bld.SET_BUILD_GROUP(group) + + features = 'c' + if pyext: + features += ' pyext' + if pyembed: + features += ' pyembed' + + t = bld( + features = features, + source = source, + target = modname, + samba_cflags = CURRENT_CFLAGS(bld, modname, cflags, + allow_warnings=allow_warnings, + use_hostcc=use_hostcc, + hide_symbols=hide_symbols), + depends_on = depends_on, + samba_deps = TO_LIST(deps), + samba_includes = includes, + local_include = local_include, + local_include_first = local_include_first, + global_include = global_include, + samba_subsystem= subsystem_name, + samba_use_hostcc = use_hostcc, + samba_use_global_deps = use_global_deps, + samba_require_builtin_deps = __require_builtin_deps, + samba_builtin_subsystem = None, + ) + + if cflags_end is not None: + t.samba_cflags.extend(TO_LIST(cflags_end)) + + if autoproto is not None: + bld.SAMBA_AUTOPROTO(autoproto, source + TO_LIST(autoproto_extra_source)) + if public_headers is not None: + bld.PUBLIC_HEADERS(public_headers, header_path=header_path, + public_headers_install=public_headers_install) + + if provide_builtin_linking: + + if use_hostcc: + raise Errors.WafError("subsystem[%s] provide_builtin_linking=True " + + "not allowed with use_hostcc=True" % + modname) + + if pyext or pyembed: + raise Errors.WafError("subsystem[%s] provide_builtin_linking=True " + + "not allowed with pyext=True nor pyembed=True" % + modname) + + if __require_builtin_deps: + raise Errors.WafError("subsystem[%s] provide_builtin_linking=True " + + "not allowed with __require_builtin_deps=True" % + modname) + + builtin_target = modname + '.builtin.objlist' + tbuiltin = __SAMBA_SUBSYSTEM_BUILTIN(bld, builtin_target, source, + deps=deps, + public_deps=public_deps, + includes=includes, + header_path=header_path, + builtin_cflags=builtin_cflags, + builtin_cflags_end='-D_PUBLIC_=_PRIVATE_', + group=group, + depends_on=depends_on, + local_include=local_include, + global_include=global_include, + allow_warnings=allow_warnings) + t.samba_builtin_subsystem = tbuiltin + + return t + + +Build.BuildContext.SAMBA_SUBSYSTEM = SAMBA_SUBSYSTEM + + +def SAMBA_GENERATOR(bld, name, rule, source='', target='', + group='generators', enabled=True, + public_headers=None, + public_headers_install=True, + private_headers=None, + header_path=None, + vars=None, + dep_vars=None, + always=False): + '''A generic source generator target''' + + if dep_vars is None: + dep_vars = [] + if not SET_TARGET_TYPE(bld, name, 'GENERATOR'): + return + + if not enabled: + return + + dep_vars = TO_LIST(dep_vars) + dep_vars.append('ruledeps') + dep_vars.append('SAMBA_GENERATOR_VARS') + + shell=isinstance(rule, str) + + # This ensures that if the command (executed in the shell) fails + # (returns non-zero), the build fails + if shell: + rule = "set -e; " + rule + + bld.SET_BUILD_GROUP(group) + t = bld( + rule=rule, + source=bld.EXPAND_VARIABLES(source, vars=vars), + shell=shell, + target=target, + update_outputs=True, + before='c', + ext_out='.c', + samba_type='GENERATOR', + dep_vars = dep_vars, + name=name) + + if vars is None: + vars = {} + t.env.SAMBA_GENERATOR_VARS = vars + + if always: + t.always = True + + if public_headers is not None: + bld.PUBLIC_HEADERS(public_headers, header_path=header_path, + public_headers_install=public_headers_install) + return t +Build.BuildContext.SAMBA_GENERATOR = SAMBA_GENERATOR + + + +@Utils.run_once +def SETUP_BUILD_GROUPS(bld): + '''setup build groups used to ensure that the different build + phases happen consecutively''' + bld.p_ln = bld.srcnode # we do want to see all targets! + bld.env['USING_BUILD_GROUPS'] = True + bld.add_group('setup') + bld.add_group('generators') + bld.add_group('hostcc_base_build_source') + bld.add_group('hostcc_base_build_main') + bld.add_group('hostcc_build_source') + bld.add_group('hostcc_build_main') + bld.add_group('vscripts') + bld.add_group('base_libraries') + bld.add_group('build_source') + bld.add_group('prototypes') + bld.add_group('headers') + bld.add_group('main') + bld.add_group('symbolcheck') + bld.add_group('syslibcheck') + bld.add_group('final') +Build.BuildContext.SETUP_BUILD_GROUPS = SETUP_BUILD_GROUPS + + +def SET_BUILD_GROUP(bld, group): + '''set the current build group''' + if not 'USING_BUILD_GROUPS' in bld.env: + return + bld.set_group(group) +Build.BuildContext.SET_BUILD_GROUP = SET_BUILD_GROUP + + + +def SAMBA_SCRIPT(bld, name, pattern, installdir, installname=None): + '''used to copy scripts from the source tree into the build directory + for use by selftest''' + + source = bld.path.ant_glob(pattern, flat=True) + + bld.SET_BUILD_GROUP('build_source') + for s in TO_LIST(source): + iname = s + if installname is not None: + iname = installname + target = os.path.join(installdir, iname) + tgtdir = os.path.dirname(os.path.join(bld.srcnode.abspath(bld.env), '..', target)) + mkdir_p(tgtdir) + link_src = os.path.normpath(os.path.join(bld.path.abspath(), s)) + link_dst = os.path.join(tgtdir, os.path.basename(iname)) + if os.path.islink(link_dst) and os.readlink(link_dst) == link_src: + continue + if os.path.islink(link_dst): + os.unlink(link_dst) + Logs.info("symlink: %s -> %s/%s" % (s, installdir, iname)) + symlink(link_src, link_dst) +Build.BuildContext.SAMBA_SCRIPT = SAMBA_SCRIPT + + +def copy_and_fix_python_path(task): + pattern='sys.path.insert(0, "bin/python")' + if task.env["PYTHONARCHDIR"] in sys.path and task.env["PYTHONDIR"] in sys.path: + replacement = "" + elif task.env["PYTHONARCHDIR"] == task.env["PYTHONDIR"]: + replacement="""sys.path.insert(0, "%s")""" % task.env["PYTHONDIR"] + else: + replacement="""sys.path.insert(0, "%s") +sys.path.insert(1, "%s")""" % (task.env["PYTHONARCHDIR"], task.env["PYTHONDIR"]) + + if task.env["PYTHON"][0].startswith("/"): + replacement_shebang = "#!%s\n" % task.env["PYTHON"][0] + else: + replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PYTHON"][0] + + installed_location=task.outputs[0].bldpath(task.env) + source_file = open(task.inputs[0].srcpath(task.env)) + installed_file = open(installed_location, 'w') + lineno = 0 + for line in source_file: + newline = line + if (lineno == 0 and + line[:2] == "#!"): + newline = replacement_shebang + elif pattern in line: + newline = line.replace(pattern, replacement) + installed_file.write(newline) + lineno = lineno + 1 + installed_file.close() + os.chmod(installed_location, 0o755) + return 0 + +def copy_and_fix_perl_path(task): + pattern='use lib "$RealBin/lib";' + + replacement = "" + if not task.env["PERL_LIB_INSTALL_DIR"] in task.env["PERL_INC"]: + replacement = 'use lib "%s";' % task.env["PERL_LIB_INSTALL_DIR"] + + if task.env["PERL"][0] == "/": + replacement_shebang = "#!%s\n" % task.env["PERL"] + else: + replacement_shebang = "#!/usr/bin/env %s\n" % task.env["PERL"] + + installed_location=task.outputs[0].bldpath(task.env) + source_file = open(task.inputs[0].srcpath(task.env)) + installed_file = open(installed_location, 'w') + lineno = 0 + for line in source_file: + newline = line + if lineno == 0 and task.env["PERL_SPECIFIED"] is True and line[:2] == "#!": + newline = replacement_shebang + elif pattern in line: + newline = line.replace(pattern, replacement) + installed_file.write(newline) + lineno = lineno + 1 + installed_file.close() + os.chmod(installed_location, 0o755) + return 0 + + +def install_file(bld, destdir, file, chmod=MODE_644, flat=False, + python_fixup=False, perl_fixup=False, + destname=None, base_name=None): + '''install a file''' + if not isinstance(file, str): + file = file.abspath() + destdir = bld.EXPAND_VARIABLES(destdir) + if not destname: + destname = file + if flat: + destname = os.path.basename(destname) + dest = os.path.join(destdir, destname) + if python_fixup: + # fix the path python will use to find Samba modules + inst_file = file + '.inst' + bld.SAMBA_GENERATOR('python_%s' % destname, + rule=copy_and_fix_python_path, + dep_vars=["PYTHON","PYTHON_SPECIFIED","PYTHONDIR","PYTHONARCHDIR"], + source=file, + target=inst_file) + file = inst_file + if perl_fixup: + # fix the path perl will use to find Samba modules + inst_file = file + '.inst' + bld.SAMBA_GENERATOR('perl_%s' % destname, + rule=copy_and_fix_perl_path, + dep_vars=["PERL","PERL_SPECIFIED","PERL_LIB_INSTALL_DIR"], + source=file, + target=inst_file) + file = inst_file + if base_name: + file = os.path.join(base_name, file) + bld.install_as(dest, file, chmod=chmod) + + +def INSTALL_FILES(bld, destdir, files, chmod=MODE_644, flat=False, + python_fixup=False, perl_fixup=False, + destname=None, base_name=None): + '''install a set of files''' + for f in TO_LIST(files): + install_file(bld, destdir, f, chmod=chmod, flat=flat, + python_fixup=python_fixup, perl_fixup=perl_fixup, + destname=destname, base_name=base_name) +Build.BuildContext.INSTALL_FILES = INSTALL_FILES + + +def INSTALL_WILDCARD(bld, destdir, pattern, chmod=MODE_644, flat=False, + python_fixup=False, exclude=None, trim_path=None): + '''install a set of files matching a wildcard pattern''' + files=TO_LIST(bld.path.ant_glob(pattern, flat=True)) + if trim_path: + files2 = [] + for f in files: + files2.append(os.path.relpath(f, trim_path)) + files = files2 + + if exclude: + for f in files[:]: + if fnmatch.fnmatch(f, exclude): + files.remove(f) + INSTALL_FILES(bld, destdir, files, chmod=chmod, flat=flat, + python_fixup=python_fixup, base_name=trim_path) +Build.BuildContext.INSTALL_WILDCARD = INSTALL_WILDCARD + +def INSTALL_DIR(bld, path, chmod=0o755): + """Install a directory if it doesn't exist, always set permissions.""" + + if not path: + return [] + + destpath = bld.EXPAND_VARIABLES(path) + if Options.options.destdir: + destpath = os.path.join(Options.options.destdir, destpath.lstrip(os.sep)) + + if bld.is_install > 0: + if not os.path.isdir(destpath): + try: + Logs.info('* create %s', destpath) + os.makedirs(destpath) + os.chmod(destpath, chmod) + except OSError as e: + if not os.path.isdir(destpath): + raise Errors.WafError("Cannot create the folder '%s' (error: %s)" % (path, e)) +Build.BuildContext.INSTALL_DIR = INSTALL_DIR + +def INSTALL_DIRS(bld, destdir, dirs, chmod=0o755): + '''install a set of directories''' + destdir = bld.EXPAND_VARIABLES(destdir) + dirs = bld.EXPAND_VARIABLES(dirs) + for d in TO_LIST(dirs): + INSTALL_DIR(bld, os.path.join(destdir, d), chmod) +Build.BuildContext.INSTALL_DIRS = INSTALL_DIRS + + +def MANPAGES(bld, manpages, install): + '''build and install manual pages''' + bld.env.MAN_XSL = 'http://docbook.sourceforge.net/release/xsl/current/manpages/docbook.xsl' + for m in manpages.split(): + source = m + '.xml' + bld.SAMBA_GENERATOR(m, + source=source, + target=m, + group='final', + rule='${XSLTPROC} --xinclude -o ${TGT} --nonet ${MAN_XSL} ${SRC}' + ) + if install: + bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) +Build.BuildContext.MANPAGES = MANPAGES + +def SAMBAMANPAGES(bld, manpages, extra_source=None): + '''build and install manual pages''' + bld.env.SAMBA_EXPAND_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/expand-sambadoc.xsl' + bld.env.SAMBA_MAN_XSL = bld.srcnode.abspath() + '/docs-xml/xslt/man.xsl' + bld.env.SAMBA_CATALOG = bld.bldnode.abspath() + '/docs-xml/build/catalog.xml' + bld.env.SAMBA_CATALOGS = os.getenv('XML_CATALOG_FILES', 'file:///etc/xml/catalog file:///usr/local/share/xml/catalog') + ' file://' + bld.env.SAMBA_CATALOG + + for m in manpages.split(): + source = [m + '.xml'] + if extra_source is not None: + source = [source, extra_source] + # ${SRC[1]}, ${SRC[2]} and ${SRC[3]} are not referenced in the + # SAMBA_GENERATOR but trigger the dependency calculation so + # ensures that manpages are rebuilt when these change. + source += ['build/DTD/samba.build.pathconfig', 'build/DTD/samba.entities', 'build/DTD/samba.build.version'] + bld.SAMBA_GENERATOR(m, + source=source, + target=m, + group='final', + dep_vars=['SAMBA_MAN_XSL', 'SAMBA_EXPAND_XSL', 'SAMBA_CATALOG'], + rule='''XML_CATALOG_FILES="${SAMBA_CATALOGS}" + export XML_CATALOG_FILES + ${XSLTPROC} --xinclude --stringparam noreference 0 -o ${TGT}.xml --nonet ${SAMBA_EXPAND_XSL} ${SRC[0].abspath(env)} + ${XSLTPROC} --nonet -o ${TGT} ${SAMBA_MAN_XSL} ${TGT}.xml''' + ) + bld.INSTALL_FILES('${MANDIR}/man%s' % m[-1], m, flat=True) +Build.BuildContext.SAMBAMANPAGES = SAMBAMANPAGES + +@after('apply_link') +@feature('cshlib') +def apply_bundle_remove_dynamiclib_patch(self): + if self.env['MACBUNDLE'] or getattr(self,'mac_bundle',False): + if not getattr(self,'vnum',None): + try: + self.env['LINKFLAGS'].remove('-dynamiclib') + self.env['LINKFLAGS'].remove('-single_module') + except ValueError: + pass diff --git a/buildtools/wafsamba/wscript b/buildtools/wafsamba/wscript new file mode 100644 index 0000000..99a6237 --- /dev/null +++ b/buildtools/wafsamba/wscript @@ -0,0 +1,737 @@ +#!/usr/bin/env python + +# this is a base set of waf rules that everything else pulls in first + +import os, sys +from waflib import Configure, Logs, Options, Utils, Context, Errors +import wafsamba +from samba_utils import symlink +from optparse import SUPPRESS_HELP + +phs = os.environ.get("PYTHONHASHSEED", None) +if phs != "1": + raise Errors.WafError('''PYTHONHASHSEED=1 missing! Don't use waf directly, use ./configure and make!''') + +# this forces configure to be re-run if any of the configure +# sections of the build scripts change. We have to check +# for this in sys.argv as options have not yet been parsed when +# we need to set this. This is off by default until some issues +# are resolved related to WAFCACHE. It will need a lot of testing +# before it is enabled by default. +if '--enable-auto-reconfigure' in sys.argv: + Configure.autoconfig = 'clobber' + +def default_value(option, default=''): + if option in Options.options.__dict__: + return Options.options.__dict__[option] + return default + +def options(opt): + opt.load('compiler_cc') + + opt.load('gnu_dirs') + + gr = opt.option_group('library handling options') + + gr.add_option('--bundled-libraries', + help=(f'''comma separated list of bundled libraries. + +{Context.g_module.APPNAME} includes copies of externally maintained +system libraries (such as popt, cmocka) as well as Samba-maintained +libraries that can be found on the system already (such as talloc, +tdb). + +This option, most useful for packagers, controls if each library +should be forced to be obtained from inside Samba (bundled), forced to +be obtained from the system (bundling disabled, ensuing that +dependency errors are not silently missed) or if that choice should be +automatic (best for end users). + +May include !LIBNAME to disable bundling a library. + +Can be 'NONE' or 'ALL' [auto]'''), + action="store", dest='BUNDLED_LIBS', default='') + + gr.add_option('--private-libraries', + help=(f'''comma separated list of normally public libraries to build instead as private libraries. + +By default {Context.g_module.APPNAME} will publish a number of public +libraries for use by other software. For Samba this would include +libwbclient, libsmbclient and others. + +This allows that to be disabled, to ensure that other software does +not use these libraries and they are placed in a private filesystem +prefix. + +May include !LIBNAME to disable making a library private in order to +limit the effect of 'ALL' '''), + action="store", dest='PRIVATE_LIBS', default='') + + extension_default = default_value('PRIVATE_EXTENSION_DEFAULT') + gr.add_option('--private-library-extension', + help=("name extension for private libraries [%s]" % extension_default), + action="store", dest='PRIVATE_EXTENSION', default=extension_default) + + extension_exception = default_value('PRIVATE_EXTENSION_EXCEPTION') + gr.add_option('--private-extension-exception', + help=("comma separated list of libraries to not apply extension to [%s]" % extension_exception), + action="store", dest='PRIVATE_EXTENSION_EXCEPTION', default=extension_exception) + + builtin_default = default_value('BUILTIN_LIBRARIES_DEFAULT') + gr.add_option('--builtin-libraries', help=( +f'''comma separated list of libraries to build directly into binaries. + +By default {Context.g_module.APPNAME} will build a large number of +shared libraries, to reduce binary size. This overrides this +behaviour and essentially statically links the specified libraries into +each binary [{builtin_default}]'''), + action="store", + dest='BUILTIN_LIBRARIES', default=builtin_default) + + gr.add_option('--minimum-library-version', + help=( +f'''list of minimum system library versions for otherwise bundled +libraries. + +{Context.g_module.APPNAME} by default requires that, in order to match +what is tested in our continuous integration (CI) test-suite, that the +versions of libraries that we include match that found on the system, +before we will select not to 'bundle'. + +This option, possibly useful for packagers, allows that specified +version to be overridden (say, if it is absolutely known that the +newer version included in this tarball has no relevant changes). + +Use this with extreme care + +(LIBNAME1:version,LIBNAME2:version)'''), + action="store", dest='MINIMUM_LIBRARY_VERSION', default='') + + gr.add_option('--disable-rpath', + help=("Disable use of rpath for build binaries"), + action="store_true", dest='disable_rpath_build', default=False) + gr.add_option('--disable-rpath-install', + help=("Disable use of rpath for library path in installed files"), + action="store_true", dest='disable_rpath_install', default=False) + gr.add_option('--disable-rpath-private-install', + help=("Disable use of rpath for private library path in installed files"), + action="store_true", dest='disable_rpath_private_install', default=False) + gr.add_option('--nonshared-binary', + help=( +f'''Disable use of shared libraries internal to {Context.g_module.APPNAME} for the listed binaries. + +The resulting binaries are 'statically linked' with regard to components provided by +{Context.g_module.APPNAME}, but remain dynamically linked to (eg) libc.so and libgnutls.so + +Currently the only tested value is 'smbtorture,smbd/smbd' for Samba'''), + action="store", dest='NONSHARED_BINARIES', default='') + gr.add_option('--disable-symbol-versions', + help=("Disable use of the --version-script linker option"), + action="store_true", dest='disable_symbol_versions', default=False) + + opt.add_option('--with-modulesdir', + help=("modules directory [PREFIX/modules]"), + action="store", dest='MODULESDIR', default='${PREFIX}/modules') + + opt.add_option('--with-privatelibdir', + help=("private library directory [PREFIX/lib/%s]" % Context.g_module.APPNAME), + action="store", dest='PRIVATELIBDIR', default=None) + + opt.add_option('--with-libiconv', + help='additional directory to search for libiconv', + action='store', dest='iconv_open', default='/usr/local', + match = ['Checking for library iconv', 'Checking for iconv_open', 'Checking for header iconv.h']) + opt.add_option('--without-gettext', + help=("Disable use of gettext"), + action="store_true", dest='disable_gettext', default=False) + + gr = opt.option_group('developer options') + + gr.add_option('-C', + help='enable configure caching', + action='store_true', dest='enable_configure_cache') + gr.add_option('--enable-auto-reconfigure', + help='enable automatic reconfigure on build', + action='store_true', dest='enable_auto_reconfigure') + gr.add_option('--enable-debug', + help=("Turn on debugging symbols"), + action="store_true", dest='debug', default=False) + gr.add_option('--enable-developer', + help=("Turn on developer warnings and debugging"), + action="store_true", dest='developer', default=False) + gr.add_option('--pidl-developer', + help=("annotate PIDL-generated code for developers"), + action="store_true", dest='pidl_developer', default=False) + gr.add_option('--disable-warnings-as-errors', + help=("Do not treat all warnings as errors (disable -Werror)"), + action="store_true", dest='disable_warnings_as_errors', default=False) + opt.add_option('--enable-coverage', + help=("enable options necessary for code coverage " + "reporting on selftest (default=no)"), + action="store_true", dest='enable_coverage', default=False) + gr.add_option('--fatal-errors', + help=("Stop compilation on first error (enable -Wfatal-errors)"), + action="store_true", dest='fatal_errors', default=False) + gr.add_option('--enable-gccdeps', + help=("Enable use of gcc -MD dependency module"), + action="store_true", dest='enable_gccdeps', default=True) + gr.add_option('--pedantic', + help=("Enable even more compiler warnings"), + action='store_true', dest='pedantic', default=False) + gr.add_option('--git-local-changes', + help=("mark version with + if local git changes"), + action='store_true', dest='GIT_LOCAL_CHANGES', default=False) + gr.add_option('--address-sanitizer', + help=("Enable address sanitizer compile and linker flags"), + action="store_true", dest='address_sanitizer', default=False) + gr.add_option('--undefined-sanitizer', + help=("Enable undefined behaviour sanitizer compile and linker flags"), + action="store_true", + dest='undefined_sanitizer', + default=False) + gr.add_option('--memory-sanitizer', + help=("Enable memory behaviour sanitizer compile and linker flags"), + action="store_true", + dest='memory_sanitizer', + default=False) + gr.add_option('--enable-libfuzzer', + help=("Build fuzzing binaries (use ADDITIONAL_CFLAGS to specify compiler options for libFuzzer or use CC=honggfuzz/hfuzz-cc)"), + action="store_true", dest='enable_libfuzzer', default=False) + gr.add_option('--enable-afl-fuzzer', + help=("Build fuzzing binaries AFL-style (typically use with CC=afl-gcc)"), + action="store_true", dest='enable_afl_fuzzer', default=False) + + # Fuzz targets may need additional LDFLAGS that we can't use on + # internal binaries like asn1_compile + + gr.add_option('--fuzz-target-ldflags', + help=("Linker flags to be used when building fuzz targets"), + action="store", dest='FUZZ_TARGET_LDFLAGS', default='') + + gr.add_option('--abi-check', + help=("Check ABI signatures for libraries"), + action='store_true', dest='ABI_CHECK', default=False) + gr.add_option('--abi-check-disable', + help=("Disable ABI checking (used with --enable-developer)"), + action='store_true', dest='ABI_CHECK_DISABLE', default=False) + gr.add_option('--abi-update', + help=("Update ABI signature files for libraries"), + action='store_true', dest='ABI_UPDATE', default=False) + + gr.add_option('--show-deps', + help=("Show dependency tree for the given target"), + dest='SHOWDEPS', default='') + + gr.add_option('--symbol-check', + help=("check symbols in object files against project rules"), + action='store_true', dest='SYMBOLCHECK', default=False) + + gr.add_option('--dup-symbol-check', + help=("check for duplicate symbols in object files and system libs (must be configured with --enable-developer)"), + action='store_true', dest='DUP_SYMBOLCHECK', default=False) + + gr.add_option('--why-needed', + help=("TARGET:DEPENDENCY check why TARGET needs DEPENDENCY"), + action='store', type='str', dest='WHYNEEDED', default=None) + + gr.add_option('--show-duplicates', + help=("Show objects which are included in multiple binaries or libraries"), + action='store_true', dest='SHOW_DUPLICATES', default=False) + + gr = opt.add_option_group('cross compilation options') + + gr.add_option('--cross-compile', + help=("configure for cross-compilation"), + action='store_true', dest='CROSS_COMPILE', default=False) + gr.add_option('--cross-execute', + help=("command prefix to use for cross-execution in configure"), + action='store', dest='CROSS_EXECUTE', default='') + gr.add_option('--cross-answers', + help=("answers to cross-compilation configuration (auto modified)"), + action='store', dest='CROSS_ANSWERS', default='') + gr.add_option('--hostcc', + help=("set host compiler when cross compiling"), + action='store', dest='HOSTCC', default=False) + + # we use SUPPRESS_HELP for these, as they are ignored, and are there only + # to allow existing RPM spec files to work + opt.add_option('--build', + help=SUPPRESS_HELP, + action='store', dest='AUTOCONF_BUILD', default='') + opt.add_option('--host', + help=SUPPRESS_HELP, + action='store', dest='AUTOCONF_HOST', default='') + opt.add_option('--target', + help=SUPPRESS_HELP, + action='store', dest='AUTOCONF_TARGET', default='') + opt.add_option('--program-prefix', + help=SUPPRESS_HELP, + action='store', dest='AUTOCONF_PROGRAM_PREFIX', default='') + opt.add_option('--disable-dependency-tracking', + help=SUPPRESS_HELP, + action='store_true', dest='AUTOCONF_DISABLE_DEPENDENCY_TRACKING', default=False) + opt.add_option('--disable-silent-rules', + help=SUPPRESS_HELP, + action='store_true', dest='AUTOCONF_DISABLE_SILENT_RULES', default=False) + + gr = opt.option_group('dist options') + gr.add_option('--sign-release', + help='sign the release tarball created by waf dist', + action='store_true', dest='SIGN_RELEASE') + gr.add_option('--tag', + help='tag release in git at the same time', + type='string', action='store', dest='TAG_RELEASE') + + opt.add_option('--disable-python', + help='do not generate python modules', + action='store_true', dest='disable_python', default=False) + + +@Utils.run_once +def configure(conf): + conf.env.hlist = [] + conf.env.srcdir = conf.srcnode.abspath() + + conf.define('SRCDIR', conf.env['srcdir']) + + conf.SETUP_CONFIGURE_CACHE(Options.options.enable_configure_cache) + + # load our local waf extensions + conf.load('gnu_dirs') + conf.load('wafsamba') + + conf.CHECK_CC_ENV() + + conf.load('compiler_c') + + conf.CHECK_STANDARD_LIBPATH() + + # we need git for 'waf dist' + conf.find_program('git', var='GIT') + + # older gcc versions (< 4.4) does not work with gccdeps, so we have to see if the .d file is generated + if Options.options.enable_gccdeps: + # stale file removal - the configuration may pick up the old .pyc file + p = os.path.join(conf.env.srcdir, 'buildtools/wafsamba/gccdeps.pyc') + if os.path.exists(p): + os.remove(p) + conf.load('gccdeps') + + # make the install paths available in environment + conf.env.LIBDIR = Options.options.LIBDIR or '${PREFIX}/lib' + conf.env.BINDIR = Options.options.BINDIR or '${PREFIX}/bin' + conf.env.SBINDIR = Options.options.SBINDIR or '${PREFIX}/sbin' + conf.env.MODULESDIR = Options.options.MODULESDIR + conf.env.PRIVATELIBDIR = Options.options.PRIVATELIBDIR + conf.env.BUNDLED_LIBS = Options.options.BUNDLED_LIBS.split(',') + conf.env.SYSTEM_LIBS = () + conf.env.PRIVATE_LIBS = Options.options.PRIVATE_LIBS.split(',') + conf.env.BUILTIN_LIBRARIES = Options.options.BUILTIN_LIBRARIES.split(',') + conf.env.NONSHARED_BINARIES = Options.options.NONSHARED_BINARIES.split(',') + + conf.env.PRIVATE_EXTENSION = Options.options.PRIVATE_EXTENSION + conf.env.PRIVATE_EXTENSION_EXCEPTION = Options.options.PRIVATE_EXTENSION_EXCEPTION.split(',') + conf.env.PRIVATE_VERSION = "%s_%s_%s" % (Context.g_module.APPNAME, + Context.g_module.VERSION, conf.env.PRIVATE_EXTENSION) + + conf.env.CROSS_COMPILE = Options.options.CROSS_COMPILE + conf.env.CROSS_EXECUTE = Options.options.CROSS_EXECUTE + conf.env.CROSS_ANSWERS = Options.options.CROSS_ANSWERS + conf.env.HOSTCC = Options.options.HOSTCC + + conf.env.AUTOCONF_BUILD = Options.options.AUTOCONF_BUILD + conf.env.AUTOCONF_HOST = Options.options.AUTOCONF_HOST + conf.env.AUTOCONF_PROGRAM_PREFIX = Options.options.AUTOCONF_PROGRAM_PREFIX + + conf.env.disable_python = Options.options.disable_python + + if (conf.env.AUTOCONF_HOST and + conf.env.AUTOCONF_BUILD and + conf.env.AUTOCONF_BUILD != conf.env.AUTOCONF_HOST): + Logs.error('ERROR: Mismatch between --build and --host. Please use --cross-compile instead') + sys.exit(1) + if conf.env.AUTOCONF_PROGRAM_PREFIX: + Logs.error('ERROR: --program-prefix not supported') + sys.exit(1) + + # enable ABI checking for developers + conf.env.ABI_CHECK = Options.options.ABI_CHECK or Options.options.developer + if Options.options.ABI_CHECK_DISABLE: + conf.env.ABI_CHECK = False + try: + conf.find_program('gdb', mandatory=True) + except: + conf.env.ABI_CHECK = False + + conf.env.enable_coverage = Options.options.enable_coverage + if conf.env.enable_coverage: + conf.ADD_LDFLAGS('-lgcov', testflags=True) + conf.ADD_CFLAGS('--coverage', testflags=True) + # disable abi check for coverage, otherwise ld will fail + conf.env.ABI_CHECK = False + + conf.env.GIT_LOCAL_CHANGES = Options.options.GIT_LOCAL_CHANGES + + conf.CHECK_UNAME() + + # see if we can compile and run a simple C program + conf.CHECK_CODE('printf("hello world")', + define='HAVE_SIMPLE_C_PROG', + mandatory=True, + execute=not conf.env.CROSS_COMPILE, + headers='stdio.h', + msg='Checking simple C program') + + # Try to find the right extra flags for -Werror behaviour + for f in ["-Werror", # GCC + "-errwarn=%all", # Sun Studio + "-qhalt=w", # IBM xlc + "-w2", # Tru64 + ]: + if conf.CHECK_CFLAGS([f]): + if not 'WERROR_CFLAGS' in conf.env: + conf.env['WERROR_CFLAGS'] = [] + conf.env['WERROR_CFLAGS'].extend([f]) + break + + # check which compiler/linker flags are needed for rpath support + if conf.CHECK_LDFLAGS(['-Wl,-rpath,.']): + conf.env['RPATH_ST'] = '-Wl,-rpath,%s' + elif conf.CHECK_LDFLAGS(['-Wl,-R,.']): + conf.env['RPATH_ST'] = '-Wl,-R,%s' + + # check for rpath + if conf.CHECK_LIBRARY_SUPPORT(rpath=True): + support_rpath = True + conf.env.RPATH_ON_BUILD = not Options.options.disable_rpath_build + conf.env.RPATH_ON_INSTALL = (conf.env.RPATH_ON_BUILD and + not Options.options.disable_rpath_install) + if not conf.env.PRIVATELIBDIR: + conf.env.PRIVATELIBDIR = '%s/%s' % (conf.env.LIBDIR, Context.g_module.APPNAME) + conf.env.RPATH_ON_INSTALL_PRIVATE = ( + not Options.options.disable_rpath_private_install) + else: + support_rpath = False + conf.env.RPATH_ON_INSTALL = False + conf.env.RPATH_ON_BUILD = False + conf.env.RPATH_ON_INSTALL_PRIVATE = False + if not conf.env.PRIVATELIBDIR: + # rpath is not possible so there is no sense in having a + # private library directory by default. + # the user can of course always override it. + conf.env.PRIVATELIBDIR = conf.env.LIBDIR + + if (not Options.options.disable_symbol_versions and + conf.CHECK_LIBRARY_SUPPORT(rpath=support_rpath, + version_script=True, + msg='-Wl,--version-script support')): + conf.env.HAVE_LD_VERSION_SCRIPT = True + else: + conf.env.HAVE_LD_VERSION_SCRIPT = False + + if conf.CHECK_CFLAGS(['-fvisibility=hidden']): + conf.env.VISIBILITY_CFLAGS = '-fvisibility=hidden' + conf.CHECK_CODE('''int main(void) { return 0; } + __attribute__((visibility("default"))) void vis_foo2(void) {}\n''', + cflags=conf.env.VISIBILITY_CFLAGS, + strict=True, + define='HAVE_VISIBILITY_ATTR', addmain=False) + + # check HAVE_CONSTRUCTOR_ATTRIBUTE + conf.CHECK_CODE(''' + void test_constructor_attribute(void) __attribute__ ((constructor)); + + void test_constructor_attribute(void) + { + return; + } + + int main(void) { + return 0; + } + ''', + 'HAVE_CONSTRUCTOR_ATTRIBUTE', + addmain=False, + strict=True, + msg='Checking for library constructor support') + + # check HAVE_PRAGMA_INIT alternatively + if not conf.env.HAVE_CONSTRUCTOR_ATTRIBUTE: + conf.CHECK_CODE(''' + #pragma init (test_init) + + void test_init(void) + { + return; + } + + int main(void) { + return 0; + } + ''', + 'HAVE_PRAGMA_INIT', + addmain=False, + strict=True, + msg='Checking for pragma init support') + + # check HAVE_DESTRUCTOR_ATTRIBUTE + conf.CHECK_CODE(''' + void test_destructor_attribute(void) __attribute__ ((destructor)); + + void test_destructor_attribute(void) + { + return; + } + + int main(void) { + return 0; + } + ''', + 'HAVE_DESTRUCTOR_ATTRIBUTE', + addmain=False, + strict=True, + msg='Checking for library destructor support') + + # check HAVE_PRAGMA_FINI alternatively + if not conf.env.HAVE_DESTRUCTOR_ATTRIBUTE: + conf.CHECK_CODE(''' + #pragma fini (test_fini) + + void test_fini(void) + { + return; + } + + int main(void) { + return 0; + } + ''', + 'HAVE_PRAGMA_FINI', + addmain=False, + strict=True, + msg='Checking for pragma fini support') + + conf.CHECK_CODE(''' + void test_attribute(void) __attribute__ (()); + + void test_attribute(void) + { + return; + } + + int main(void) { + return 0; + } + ''', + 'HAVE___ATTRIBUTE__', + addmain=False, + strict=True, + msg='Checking for __attribute__') + + # Solaris by default uses draft versions of some functions unless you set + # _POSIX_PTHREAD_SEMANTICS + if sys.platform.startswith('sunos'): + conf.DEFINE('_POSIX_PTHREAD_SEMANTICS', 1) + + if sys.platform.startswith('aix'): + conf.DEFINE('_ALL_SOURCE', 1, add_to_cflags=True) + # Might not be needed if ALL_SOURCE is defined + # conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) + + # we should use the PIC options in waf instead + # Some compiler didn't support -fPIC but just print a warning + if conf.env['COMPILER_CC'] == "suncc": + conf.ADD_CFLAGS('-KPIC', testflags=True) + # we really want define here as we need to have this + # define even during the tests otherwise detection of + # boolean is broken + conf.DEFINE('_STDC_C99', 1, add_to_cflags=True) + conf.DEFINE('_XPG6', 1, add_to_cflags=True) + else: + conf.ADD_CFLAGS('-fPIC', testflags=True) + + # On Solaris 8 with suncc (at least) the flags for the linker to define the name of the + # library are not always working (if the command line is very very long and with a lot + # of files) + + if conf.env['COMPILER_CC'] == "suncc": + save = conf.env['SONAME_ST'] + conf.env['SONAME_ST'] = '-Wl,-h,%s' + if not conf.CHECK_SHLIB_INTRASINC_NAME_FLAGS("Checking if flags %s are ok" % conf.env['SONAME_ST']): + conf.env['SONAME_ST'] = save + + conf.CHECK_INLINE() + + # check for pkgconfig + conf.CHECK_CFG(atleast_pkgconfig_version='0.0.0') + + conf.DEFINE('_GNU_SOURCE', 1, add_to_cflags=True) + conf.DEFINE('_XOPEN_SOURCE_EXTENDED', 1, add_to_cflags=True) + + # + # Needs to be defined before std*.h and string*.h are included + # As Python.h already brings string.h we need it in CFLAGS. + # See memset_s() details here: + # https://en.cppreference.com/w/c/string/byte/memset + # + if conf.CHECK_CFLAGS(['-D__STDC_WANT_LIB_EXT1__=1']): + conf.ADD_CFLAGS('-D__STDC_WANT_LIB_EXT1__=1') + + # on Tru64 certain features are only available with _OSF_SOURCE set to 1 + # and _XOPEN_SOURCE set to 600 + if conf.env['SYSTEM_UNAME_SYSNAME'] == 'OSF1': + conf.DEFINE('_OSF_SOURCE', 1, add_to_cflags=True) + conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) + + # SCM_RIGHTS is only avail if _XOPEN_SOURCE iѕ defined on IRIX + if conf.env['SYSTEM_UNAME_SYSNAME'] == 'IRIX': + conf.DEFINE('_XOPEN_SOURCE', 600, add_to_cflags=True) + conf.DEFINE('_BSD_TYPES', 1, add_to_cflags=True) + + # Try to find the right extra flags for C99 initialisers + for f in ["", "-AC99", "-qlanglvl=extc99", "-qlanglvl=stdc99", "-c99"]: + if conf.CHECK_CFLAGS([f], ''' +struct foo {int x;char y;}; +struct foo bar = { .y = 'X', .x = 1 }; +'''): + if f != "": + conf.ADD_CFLAGS(f) + break + + # get the base headers we'll use for the rest of the tests + conf.CHECK_HEADERS('stdio.h sys/types.h sys/stat.h stdlib.h stddef.h memory.h string.h', + add_headers=True) + conf.CHECK_HEADERS('strings.h inttypes.h stdint.h unistd.h minix/config.h', add_headers=True) + conf.CHECK_HEADERS('ctype.h', add_headers=True) + + if sys.platform == 'darwin': + conf.DEFINE('_DARWIN_C_SOURCE', 1, add_to_cflags=True) + conf.DEFINE('_DARWIN_UNLIMITED_GETGROUPS', 1, add_to_cflags=True) + else: + conf.CHECK_HEADERS('standards.h', add_headers=True) + + conf.CHECK_HEADERS('stdbool.h stdint.h stdarg.h vararg.h', add_headers=True) + conf.CHECK_HEADERS('limits.h assert.h') + + # see if we need special largefile flags + if not conf.CHECK_LARGEFILE(): + raise Errors.WafError('Samba requires large file support, but not available on this platform: sizeof(off_t) < 8') + + if conf.env.HAVE_STDDEF_H and conf.env.HAVE_STDLIB_H: + conf.DEFINE('STDC_HEADERS', 1) + + conf.CHECK_HEADERS('sys/time.h time.h', together=True) + + if conf.env.HAVE_SYS_TIME_H and conf.env.HAVE_TIME_H: + conf.DEFINE('TIME_WITH_SYS_TIME', 1) + + # cope with different extensions for libraries + (root, ext) = os.path.splitext(conf.env.cshlib_PATTERN) + if ext[0] == '.': + conf.define('SHLIBEXT', ext[1:], quote=True) + else: + conf.define('SHLIBEXT', "so", quote=True) + + # First try a header check for cross-compile friendliness + conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER + #define B __BYTE_ORDER + #elif defined(BYTE_ORDER) + #define B BYTE_ORDER + #endif + + #ifdef __LITTLE_ENDIAN + #define LITTLE __LITTLE_ENDIAN + #elif defined(LITTLE_ENDIAN) + #define LITTLE LITTLE_ENDIAN + #endif + + #if !defined(LITTLE) || !defined(B) || LITTLE != B + #error Not little endian. + #endif + int main(void) { return 0; }\n""", + addmain=False, + headers="endian.h sys/endian.h", + define="HAVE_LITTLE_ENDIAN") + conf.CHECK_CODE(code = """#ifdef __BYTE_ORDER + #define B __BYTE_ORDER + #elif defined(BYTE_ORDER) + #define B BYTE_ORDER + #endif + + #ifdef __BIG_ENDIAN + #define BIG __BIG_ENDIAN + #elif defined(BIG_ENDIAN) + #define BIG BIG_ENDIAN + #endif + + #if !defined(BIG) || !defined(B) || BIG != B + #error Not big endian. + #endif + int main(void) { return 0; }\n""", + addmain=False, + headers="endian.h sys/endian.h", + define="HAVE_BIG_ENDIAN") + + if not conf.CONFIG_SET("HAVE_BIG_ENDIAN") and not conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): + # That didn't work! Do runtime test. + conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; + u.i = 0x01020304; + return u.c[0] == 0x04 && u.c[1] == 0x03 && u.c[2] == 0x02 && u.c[3] == 0x01 ? 0 : 1;""", + addmain=True, execute=True, + define='HAVE_LITTLE_ENDIAN', + msg="Checking for HAVE_LITTLE_ENDIAN - runtime") + conf.CHECK_CODE("""union { int i; char c[sizeof(int)]; } u; + u.i = 0x01020304; + return u.c[0] == 0x01 && u.c[1] == 0x02 && u.c[2] == 0x03 && u.c[3] == 0x04 ? 0 : 1;""", + addmain=True, execute=True, + define='HAVE_BIG_ENDIAN', + msg="Checking for HAVE_BIG_ENDIAN - runtime") + + # Extra sanity check. + if conf.CONFIG_SET("HAVE_BIG_ENDIAN") == conf.CONFIG_SET("HAVE_LITTLE_ENDIAN"): + Logs.error("Failed endian determination. The PDP-11 is back?") + sys.exit(1) + else: + if conf.CONFIG_SET("HAVE_BIG_ENDIAN"): + conf.DEFINE('WORDS_BIGENDIAN', 1) + + # check if signal() takes a void function + if conf.CHECK_CODE('return *(signal (0, 0)) (0) == 1', + define='RETSIGTYPE_INT', + execute=False, + headers='signal.h', + msg='Checking if signal handlers return int'): + conf.DEFINE('RETSIGTYPE', 'int') + else: + conf.DEFINE('RETSIGTYPE', 'void') + + conf.CHECK_VARIABLE('__FUNCTION__', define='HAVE_FUNCTION_MACRO') + + conf.CHECK_CODE('va_list ap1,ap2; va_copy(ap1,ap2)', + define="HAVE_VA_COPY", + msg="Checking for va_copy") + + conf.env.enable_fuzzing = False + + conf.env.enable_libfuzzer = Options.options.enable_libfuzzer + conf.env.enable_afl_fuzzer = Options.options.enable_afl_fuzzer + if conf.env.enable_libfuzzer or conf.env.enable_afl_fuzzer: + conf.env.enable_fuzzing = True + conf.DEFINE('FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION', 1) + conf.env.FUZZ_TARGET_LDFLAGS = Options.options.FUZZ_TARGET_LDFLAGS + + conf.SAMBA_BUILD_ENV() + + +def build(bld): + # give a more useful message if the source directory has moved + curdir = bld.path.abspath() + srcdir = bld.srcnode.abspath() + relpath = os.path.relpath(curdir, srcdir) + if relpath.find('../') != -1: + Logs.error('bld.path %s is not a child of %s' % (curdir, srcdir)) + raise Errors.WafError('''The top source directory has moved. Please run distclean and reconfigure''') + + bld.SETUP_BUILD_GROUPS() + bld.ENFORCE_GROUP_ORDERING() + bld.CHECK_PROJECT_RULES() diff --git a/compat/talloc_compat1.c b/compat/talloc_compat1.c new file mode 100644 index 0000000..519e8c3 --- /dev/null +++ b/compat/talloc_compat1.c @@ -0,0 +1,51 @@ +/* + Samba trivial allocation library - compat functions + + Copyright (C) Stefan Metzmacher 2009 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +/* + * This file contains only function to build a + * compat talloc.so.1 library on top of talloc.so.2 + */ + +#include "replace.h" +#include "talloc.h" + +void *_talloc_reference(const void *context, const void *ptr); +void *_talloc_reference(const void *context, const void *ptr) { + return _talloc_reference_loc(context, ptr, + "Called from talloc compat1 " + "_talloc_reference"); +} + +void *_talloc_steal(const void *new_ctx, const void *ptr); +void *_talloc_steal(const void *new_ctx, const void *ptr) +{ + return talloc_reparent(talloc_parent(ptr), new_ctx, ptr); +} + +#undef talloc_free +int talloc_free(void *ptr); +int talloc_free(void *ptr) +{ + return talloc_unlink(talloc_parent(ptr), ptr); +} + diff --git a/compat/talloc_compat1.mk b/compat/talloc_compat1.mk new file mode 100644 index 0000000..d1817f0 --- /dev/null +++ b/compat/talloc_compat1.mk @@ -0,0 +1,21 @@ +talloccompatdir := $(tallocdir)/compat + +TALLOC_COMPAT1_VERSION_MAJOR = 1 +TALLOC_COMPAT1_OBJ = $(talloccompatdir)/talloc_compat1.o + +TALLOC_COMPAT1_SOLIB = libtalloc-compat1-$(TALLOC_VERSION).$(SHLIBEXT) +TALLOC_COMPAT1_SONAME = libtalloc.$(SHLIBEXT).$(TALLOC_COMPAT1_VERSION_MAJOR) + +$(TALLOC_COMPAT1_SOLIB): $(TALLOC_COMPAT1_OBJ) $(TALLOC_SOLIB) + $(SHLD) $(SHLD_FLAGS) -o $@ $(TALLOC_COMPAT1_OBJ) \ + $(TALLOC_SOLIB) $(SONAMEFLAG)$(TALLOC_COMPAT1_SONAME) + +all:: $(TALLOC_COMPAT1_SOLIB) + +install:: + ${INSTALLCMD} -d $(DESTDIR)$(libdir) + ${INSTALLCMD} -m 755 $(TALLOC_COMPAT1_SOLIB) $(DESTDIR)$(libdir) + +clean:: + rm -f $(TALLOC_COMPAT1_OBJ) $(TALLOC_COMPAT1_SOLIB) + diff --git a/configure b/configure new file mode 100755 index 0000000..af76185 --- /dev/null +++ b/configure @@ -0,0 +1,28 @@ +#!/bin/sh + +PREVPATH=$(dirname $0) + +if [ -f $PREVPATH/../../buildtools/bin/waf ]; then + WAF=../../buildtools/bin/waf +elif [ -f $PREVPATH/buildtools/bin/waf ]; then + WAF=./buildtools/bin/waf +else + echo "replace: Unable to find waf" + exit 1 +fi + +# using JOBS=1 gives maximum compatibility with +# systems like AIX which have broken threading in python +JOBS=1 +export JOBS + +# Make sure we don't have any library preloaded. +unset LD_PRELOAD + +# Make sure we get stable hashes +PYTHONHASHSEED=1 +export PYTHONHASHSEED + +cd . || exit 1 +$PYTHON $WAF configure "$@" || exit 1 +cd $PREVPATH diff --git a/doc/context.png b/doc/context.png Binary files differnew file mode 100644 index 0000000..48a6ca0 --- /dev/null +++ b/doc/context.png diff --git a/doc/context_tree.png b/doc/context_tree.png Binary files differnew file mode 100644 index 0000000..9723459 --- /dev/null +++ b/doc/context_tree.png diff --git a/doc/mainpage.dox b/doc/mainpage.dox new file mode 100644 index 0000000..ece6ccb --- /dev/null +++ b/doc/mainpage.dox @@ -0,0 +1,111 @@ +/** + * @mainpage + * + * talloc is a hierarchical, reference counted memory pool system with + * destructors. It is the core memory allocator used in Samba. + * + * @section talloc_download Download + * + * You can download the latest releases of talloc from the + * <a href="http://samba.org/ftp/talloc" target="_blank">talloc directory</a> + * on the samba public source archive. + * + * @section main-tutorial Tutorial + * + * You should start by reading @subpage libtalloc_tutorial, then reading the documentation of + * the interesting functions as you go. + + * @section talloc_bugs Discussion and bug reports + * + * talloc does not currently have its own mailing list or bug tracking system. + * For now, please use the + * <a href="https://lists.samba.org/mailman/listinfo/samba-technical" target="_blank">samba-technical</a> + * mailing list, and the + * <a href="http://bugzilla.samba.org/" target="_blank">Samba bugzilla</a> + * bug tracking system. + * + * @section talloc_devel Development + * You can download the latest code either via git or rsync. + * + * To fetch via git see the following guide: + * + * <a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development" target="_blank">Using Git for Samba Development</a> + * + * Once you have cloned the tree switch to the master branch and cd into the + * lib/tevent directory. + * + * To fetch via rsync use this command: + * + * rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/talloc . + * + * @section talloc_preample Preamble + * + * talloc is a hierarchical, reference counted memory pool system with + * destructors. + * + * Perhaps the biggest difference from other memory pool systems is that there + * is no distinction between a "talloc context" and a "talloc pointer". Any + * pointer returned from talloc() is itself a valid talloc context. This means + * you can do this: + * + * @code + * struct foo *X = talloc(mem_ctx, struct foo); + * X->name = talloc_strdup(X, "foo"); + * @endcode + * + * The pointer X->name would be a "child" of the talloc context "X" which is + * itself a child of mem_ctx. So if you do talloc_free(mem_ctx) then it is all + * destroyed, whereas if you do talloc_free(X) then just X and X->name are + * destroyed, and if you do talloc_free(X->name) then just the name element of + * X is destroyed. + * + * If you think about this, then what this effectively gives you is an n-ary + * tree, where you can free any part of the tree with talloc_free(). + * + * If you find this confusing, then run the testsuite to watch talloc in + * action. You may also like to add your own tests to testsuite.c to clarify + * how some particular situation is handled. + * + * @section talloc_performance Performance + * + * All the additional features of talloc() over malloc() do come at a price. We + * have a simple performance test in Samba4 that measures talloc() versus + * malloc() performance, and it seems that talloc() is about 4% slower than + * malloc() on my x86 Debian Linux box. For Samba, the great reduction in code + * complexity that we get by using talloc makes this worthwhile, especially as + * the total overhead of talloc/malloc in Samba is already quite small. + * + * @section talloc_named Named blocks + * + * Every talloc chunk has a name that can be used as a dynamic type-checking + * system. If for some reason like a callback function you had to cast a + * "struct foo *" to a "void *" variable, later you can safely reassign the + * "void *" pointer to a "struct foo *" by using the talloc_get_type() or + * talloc_get_type_abort() macros. + * + * @code + * struct foo *X = talloc_get_type_abort(ptr, struct foo); + * @endcode + * + * This will abort if "ptr" does not contain a pointer that has been created + * with talloc(mem_ctx, struct foo). + * + * @section talloc_threading Multi-threading + * + * talloc itself does not deal with threads. It is thread-safe (assuming the + * underlying "malloc" is), as long as each thread uses different memory + * contexts. + * + * If two threads uses the same context then they need to synchronize in order + * to be safe. In particular: + * + * - when using talloc_enable_leak_report(), giving directly NULL as a parent + * context implicitly refers to a hidden "null context" global variable, so + * this should not be used in a multi-threaded environment without proper + * synchronization. In threaded code turn off null tracking using + * talloc_disable_null_tracking(). + * - the context returned by talloc_autofree_context() is also global so + * shouldn't be used by several threads simultaneously without + * synchronization. + * + */ diff --git a/doc/stealing.png b/doc/stealing.png Binary files differnew file mode 100644 index 0000000..8833e06 --- /dev/null +++ b/doc/stealing.png diff --git a/doc/tutorial_bestpractices.dox b/doc/tutorial_bestpractices.dox new file mode 100644 index 0000000..3634446 --- /dev/null +++ b/doc/tutorial_bestpractices.dox @@ -0,0 +1,192 @@ +/** +@page libtalloc_bestpractices Chapter 7: Best practises + +The following sections contain several best practices and good manners that were +found by the <a href="http://www.samba.org">Samba</a> and +<a href="https://fedorahosted.org/sssd">SSSD</a> developers over the years. +These will help you to write code which is better, easier to debug and with as +few (hopefully none) memory leaks as possible. + +@section bp-hierarchy Keep the context hierarchy steady + +The talloc is a hierarchy memory allocator. The hierarchy nature is what makes +the programming more error proof. It makes the memory easier to manage and to +free. Therefore, the first thing we should have on our mind is: always project +your data structures into the talloc context hierarchy. + +That means if we have a structure, we should always use it as a parent context +for its elements. This way we will not encounter any troubles when freeing the +structure or when changing its parent. The same rule applies for arrays. + +For example, the structure <code>user</code> from section @ref context-hierarchy +should be created with the context hierarchy illustrated on the next image. + +@image html context_tree.png + +@section bp-tmpctx Every function should use its own context + +It is a good practice to create a temporary talloc context at the function +beginning and free the context just before the return statement. All the data +must be allocated on this context or on its children. This ensures that no +memory leaks are created as long as we do not forget to free the temporary +context. + +This pattern applies to both situations - when a function does not return any +dynamically allocated value and when it does. However, it needs a little +extension for the latter case. + +@subsection bp-tmpctx-1 Functions that do not return any dynamically allocated +value + +If the function does not return any value created on the heap, we will just obey +the aforementioned pattern. + +@code +int bar() +{ + int ret; + TALLOC_CTX *tmp_ctx = talloc_new(NULL); + if (tmp_ctx == NULL) { + ret = ENOMEM; + goto done; + } + /* allocate data on tmp_ctx or on its descendants */ + ret = EOK; +done: + talloc_free(tmp_ctx); + return ret; +} +@endcode + +@subsection bp-tmpctx-2 Functions returning dynamically allocated values + +If our function returns any dynamically allocated data, its first parameter +should always be the destination talloc context. This context serves as a parent +for the output values. But again, we will create the output values as the +descendants of the temporary context. If everything goes well, we will change +the parent of the output values from the temporary to the destination talloc +context. + +This pattern ensures that if an error occurs (e.g. I/O error or insufficient +amount of the memory), all allocated data is freed and no garbage appears on +the destination context. + +@code +int struct_foo_init(TALLOC_CTX *mem_ctx, struct foo **_foo) +{ + int ret; + struct foo *foo = NULL; + TALLOC_CTX *tmp_ctx = talloc_new(NULL); + if (tmp_ctx == NULL) { + ret = ENOMEM; + goto done; + } + foo = talloc_zero(tmp_ctx, struct foo); + /* ... */ + *_foo = talloc_steal(mem_ctx, foo); + ret = EOK; +done: + talloc_free(tmp_ctx); + return ret; +} +@endcode + +@section bp-null Allocate temporary contexts on NULL + +As it can be seen on the previous listing, instead of allocating the temporary +context directly on <code>mem_ctx</code>, we created a new top level context +using <code>NULL</code> as the parameter for <code>talloc_new()</code> function. +Take a look at the following example: + +@code +char *create_user_filter(TALLOC_CTX *mem_ctx, + uid_t uid, const char *username) +{ + char *filter = NULL; + char *sanitized_username = NULL; + /* tmp_ctx is a child of mem_ctx */ + TALLOC_CTX *tmp_ctx = talloc_new(mem_ctx); + if (tmp_ctx == NULL) { + return NULL; + } + + sanitized_username = sanitize_string(tmp_ctx, username); + if (sanitized_username == NULL) { + talloc_free(tmp_ctx); + return NULL; + } + + filter = talloc_aprintf(tmp_ctx,"(|(uid=%llu)(uname=%s))", + uid, sanitized_username); + if (filter == NULL) { + return NULL; /* tmp_ctx is not freed */ (*@\label{lst:tmp-ctx-3:leak}@*) + } + + /* filter becomes a child of mem_ctx */ + filter = talloc_steal(mem_ctx, filter); + talloc_free(tmp_ctx); + return filter; +} +@endcode + +We forgot to free <code>tmp_ctx</code> before the <code>return</code> statement +in the <code>filter == NULL</code> condition. However, it is created as a child +of <code>mem_ctx</code> context and as such it will be freed as soon as the +<code>mem_ctx</code> is freed. Therefore, no detectable memory leak is created. + +On the other hand, we do not have any way to access the allocated data +and for all we know <code>mem_ctx</code> may exist for the lifetime of our +application. For these reasons this should be considered as a memory leak. How +can we detect if it is unreferenced but still attached to its parent context? +The only way is to notice the mistake in the source code. + +But if we create the temporary context as a top level context, it will not be +freed and memory diagnostic tools +(e.g. <a href="http://valgrind.org">valgrind</a>) are able to do their job. + +@section bp-pool Temporary contexts and the talloc pool + +If we want to take the advantage of the talloc pool but also keep to the +pattern introduced in the previous section, we are unable to do it directly. The +best thing to do is to create a conditional build where we can decide how do we +want to create the temporary context. For example, we can create the following +macros: + +@code +#ifdef USE_POOL_CONTEXT + #define CREATE_POOL_CTX(ctx, size) talloc_pool(ctx, size) + #define CREATE_TMP_CTX(ctx) talloc_new(ctx) +#else + #define CREATE_POOL_CTX(ctx, size) talloc_new(ctx) + #define CREATE_TMP_CTX(ctx) talloc_new(NULL) +#endif +@endcode + +Now if our application is under development, we will build it with macro +<code>USE_POOL_CONTEXT</code> undefined. This way, we can use memory diagnostic +utilities to detect memory leaks. + +The release version will be compiled with the macro defined. This will enable +pool contexts and therefore reduce the <code>malloc()</code> calls, which will +end up in a little bit faster processing. + +@code +int struct_foo_init(TALLOC_CTX *mem_ctx, struct foo **_foo) +{ + int ret; + struct foo *foo = NULL; + TALLOC_CTX *tmp_ctx = CREATE_TMP_CTX(mem_ctx); + /* ... */ +} + +errno_t handle_request(TALLOC_CTX mem_ctx) +{ + int ret; + struct foo *foo = NULL; + TALLOC_CTX *pool_ctx = CREATE_POOL_CTX(NULL, 1024); + ret = struct_foo_init(mem_ctx, &foo); + /* ... */ +} +@endcode + +*/ diff --git a/doc/tutorial_context.dox b/doc/tutorial_context.dox new file mode 100644 index 0000000..b8bfe26 --- /dev/null +++ b/doc/tutorial_context.dox @@ -0,0 +1,198 @@ +/** +@page libtalloc_context Chapter 1: Talloc context +@section context Talloc context + +The talloc context is the most important part of this library and is +responsible for every single feature of this memory allocator. It is a logical +unit which represents a memory space managed by talloc. + +From the programmer's point of view, the talloc context is completely +equivalent to a pointer that would be returned by the memory routines from the +C standard library. This means that every context that is returned from the +talloc library can be used directly in functions that do not use talloc +internally. For example we can do the following: + +@code +char *str1 = strdup("I am NOT a talloc context"); +char *str2 = talloc_strdup(NULL, "I AM a talloc context"); + +printf("%d\n", strcmp(str1, str2) == 0); + +free(str1); +talloc_free(str2); /* we can not use free() on str2 */ +@endcode + +This is possible because the context is internally handled as a special +fixed-length structure called talloc chunk. Each chunk stores context metadata +followed by the memory space requested by the programmer. When a talloc +function returns a context (pointer), it will in fact return a pointer to the user +space portion of the talloc chunk. If we to manipulate this context using +talloc functions, the talloc library transforms the user-space pointer back to +the starting address of the chunk. This is also the reason why we were unable +to use <code>free(str2)</code> in the previous example - because +<code>str2</code> does not point at the beginning of the allocated block of +memory. This is illustrated on the next image: + +@image html context.png + +The type TALLOC_CTX is defined in talloc.h to identify a talloc context in +function parameters. However, this type is just an alias for <code>void</code> +and exists only for semantical reasons - thus we can differentiate between +<code>void *</code> (arbitrary data) and <code>TALLOC_CTX *</code> (talloc +context). + +@subsection metadata Context meta data + +Every talloc context carries several pieces of internal information along with +the allocated memory: + + - name - which is used in reports of context hierarchy and to simulate + a dynamic type system, + - size of the requested memory in bytes - this can be used to determine + the number of elements in arrays, + - attached destructor - which is executed just before the memory block is + about to be freed, + - references to the context + - children and parent contexts - create the hierarchical view on the + memory. + +@section context-hierarchy Hierarchy of talloc context + +Every talloc context contains information about its parent and children. Talloc +uses this information to create a hierarchical model of memory or to be more +precise, it creates an n-ary tree where each node represents a single talloc +context. The root node of the tree is referred to as a top level context - a +context without any parent. + +This approach has several advantages: + + - as a consequence of freeing a talloc context, all of its children + will be properly deallocated as well, + - the parent of a context can be changed at any time, which + results in moving the whole subtree under another node, + - it creates a more natural way of managing data structures. + +@subsection Example + +We have a structure that stores basic information about a user - his/her name, +identification number and groups he/she is a member of: + +@code +struct user { + uid_t uid; + char *username; + size_t num_groups; + char **groups; +}; +@endcode + +We will allocate this structure using talloc. The result will be the following +context tree: + +@image html context_tree.png + +@code +/* create new top level context */ +struct user *user = talloc(NULL, struct user); + +user->uid = 1000; +user->num_groups = N; + +/* make user the parent of following contexts */ +user->username = talloc_strdup(user, "Test user"); +user->groups = talloc_array(user, char*, user->num_groups); + +for (i = 0; i < user->num_groups; i++) { + /* make user->groups the parent of following context */ + user->groups[i] = talloc_asprintf(user->groups, + "Test group %d", i); +} +@endcode + +This way, we have gained a lot of additional capabilities, one of which is +very simple deallocation of the structure and all of its elements. + +With the C standard library we need first to iterate over the array of groups +and free every element separately. Then we must deallocate the array that stores +them. Next we deallocate the username and as the last step free the structure +itself. But with talloc, the only operation we need to execute is freeing the +structure context. Its descendants will be freed automatically. + +@code +talloc_free(user); +@endcode + +@section keep-hierarchy Always keep the hieararchy steady! + +The talloc is a hierarchy memory allocator. The hierarchy nature is what makes +the programming more error proof. It makes the memory easier to manage and to +free. Therefore, the first thing we should have on our mind is: <strong>always +project our data structures into the talloc context hierarchy</strong>. + +That means if we have a structure, we should always use it as a parent context +for its elements. This way we will not encounter any troubles when freeing this +structure or when changing its parent. The same rule applies for arrays. + +@section creating-context Creating a talloc context + +Here are the most important functions that create a new talloc context. + +@subsection type-safe Type-safe functions + +It allocates the size that is necessary for the given type and returns a new, +properly-casted pointer. This is the preferred way to create a new context as +we can rely on the compiler to detect type mismatches. + +The name of the context is automatically set to the name of the data type which +is used to simulate a dynamic type system. + +@code +struct user *user = talloc(ctx, struct user); + +/* initialize to default values */ +user->uid = 0; +user->name = NULL; +user->num_groups = 0; +user->groups = NULL; + +/* or we can achieve the same result with */ +struct user *user_zero = talloc_zero(ctx, struct user); +@endcode + +@subsection zero-length Zero-length contexts + +The zero-length context is basically a context without any special semantical +meaning. We can use it the same way as any other context. The only difference +is that it consists only of the meta data about the context. Therefore, it is +strictly of type <code>TALLOC_CTX*</code>. It is often used in cases where we +want to aggregate several data structures under one parent (zero-length) +context, such as a temporary context to contain memory needed within a single +function that is not interesting to the caller. Allocating on a zero-length +temporary context will make clean-up of the function simpler. + +@code +TALLOC_CTX *tmp_ctx = NULL; +struct foo *foo = NULL; +struct bar *bar = NULL; + +/* new zero-length top level context */ +tmp_ctx = talloc_new(NULL); +if (tmp_ctx == NULL) { + return ENOMEM; +} + +foo = talloc(tmp_ctx, struct foo); +bar = talloc(tmp_ctx, struct bar); + +/* free everything at once */ +talloc_free(tmp_ctx); +@endcode + +@subsection context-see-also See also + +- talloc_size() +- talloc_named() +- @ref talloc_array +- @ref talloc_string + +*/ diff --git a/doc/tutorial_debugging.dox b/doc/tutorial_debugging.dox new file mode 100644 index 0000000..aadbb0d --- /dev/null +++ b/doc/tutorial_debugging.dox @@ -0,0 +1,116 @@ +/** +@page libtalloc_debugging Chapter 6: Debugging + +Although talloc makes memory management significantly easier than the C standard +library, developers are still only humans and can make mistakes. Therefore, it +can be handy to know some tools for the inspection of talloc memory usage. + +@section log-abort Talloc log and abort + +We have already encountered the abort function in section @ref dts. +In that case it was used when a type mismatch was detected. However, talloc +calls this abort function in several more situations: + +- when the provided pointer is not a valid talloc context, +- when the meta data is invalid - probably due to memory corruption, +- and when an access after free is detected. + +The third one is probably the most interesting. It can help us with detecting +an attempt to double-free a context or any other manipulation with it via +talloc functions (using it as a parent, stealing it, etc.). + +Before the context is freed talloc sets a flag in the meta data. This is then +used to detect the access after free. It basically works on the assumption that +the memory stays unchanged (at least for a while) even when it is properly +deallocated. This will work even if the memory is filled with the value +specified in <code>TALLOC_FREE_FILL</code> environment variable, because it +fills only the data part and leaves the meta data intact. + +Apart from the abort function, talloc uses a log function to provide additional +information to the aforementioned violations. To enable logging we shall set the +log function with one of: + +- talloc_set_log_fn() +- talloc_set_log_stderr() + +The following code is a sample output of accessing a context after it has been +freed: + +@code +talloc_set_log_stderr(); +TALLOC_CTX *ctx = talloc_new(NULL); + +talloc_free(ctx); +talloc_free(ctx); + +results in: +talloc: access after free error - first free may be at ../src/main.c:55 +Bad talloc magic value - access after free +@endcode + +Another example is an invalid context: + +@code +talloc_set_log_stderr(); +TALLOC_CTX *ctx = talloc_new(NULL); +char *str = strdup("not a talloc context"); +talloc_steal(ctx, str); + +results in: +Bad talloc magic value - unknown value +@endcode + +@section reports Memory usage reports + +Talloc can print reports of memory usage of a specified talloc context to a +file (to <code>stdout</code> or <code>stderr</code>). The report can be +simple or full. The simple report provides information only about the context +itself and its direct descendants. The full report goes recursively through the +entire context tree. See: + +- talloc_report() +- talloc_report_full() + +We will use the following code to retrieve the sample report: + +@code +struct foo { + char *str; +}; + +TALLOC_CTX *ctx = talloc_new(NULL); +char *str = talloc_strdup(ctx, "my string"); +struct foo *foo = talloc_zero(ctx, struct foo); +foo->str = talloc_strdup(foo, "I am Foo"); +char *str2 = talloc_strdup(foo, "Foo is my parent"); + +/* print full report */ +talloc_report_full(ctx, stdout); +@endcode + +It will print a full report of <code>ctx</code> to the standard output. +The message should be similar to: + +@code +full talloc report on 'talloc_new: ../src/main.c:82' (total 46 bytes in 5 blocks) + struct foo contains 34 bytes in 3 blocks (ref 0) 0x1495130 + Foo is my parent contains 17 bytes in 1 blocks (ref 0) 0x1495200 + I am Foo contains 9 bytes in 1 blocks (ref 0) 0x1495190 + my string contains 10 bytes in 1 blocks (ref 0) 0x14950c0 +@endcode + +We can notice in this report that something is wrong with the context containing +<code>struct foo</code>. We know that the structure has only one string element. +However, we can see in the report that it has two children. This indicates that +we have either violated the memory hierarchy or forgotten to free it as +temporary data. Looking into the code, we can see that <code>"Foo is my parent" +</code> should be attached to <code>ctx</code>. + +See also: + +- talloc_enable_null_tracking() +- talloc_disable_null_tracking() +- talloc_enable_leak_report() +- talloc_enable_leak_report_full() + +*/ diff --git a/doc/tutorial_destructors.dox b/doc/tutorial_destructors.dox new file mode 100644 index 0000000..ed06387 --- /dev/null +++ b/doc/tutorial_destructors.dox @@ -0,0 +1,82 @@ +/** +@page libtalloc_destructors Chapter 4: Using destructors + +@section destructors Using destructors + +Destructors are well known methods in the world of object oriented programming. +A destructor is a method of an object that is automatically run when the object +is destroyed. It is usually used to return resources taken by the object back to +the system (e.g. closing file descriptors, terminating connection to a database, +deallocating memory). + +With talloc we can take the advantage of destructors even in C. We can easily +attach our own destructor to a talloc context. When the context is freed, the +destructor will run automatically. + +To attach/detach a destructor to a talloc context use: talloc_set_destructor(). + +@section destructors-example Example + +Imagine that we have a dynamically created linked list. Before we deallocate an +element of the list, we need to make sure that we have successfully removed it +from the list. Normally, this would be done by two commands in the exact order: +remove it from the list and then free the element. With talloc, we can do this +at once by setting a destructor on the element which will remove it from the +list and talloc_free() will do the rest. + +The destructor would be: + +@code +int list_remove(void *ctx) +{ + struct list_el *el = NULL; + el = talloc_get_type_abort(ctx, struct list_el); + /* remove element from the list */ +} +@endcode + +GCC version 3 and newer can check for the types during the compilation. So if +it is our major compiler, we can use a more advanced destructor: + +@code +int list_remove(struct list_el *el) +{ + /* remove element from the list */ +} +@endcode + +Now we will assign the destructor to the list element. We can do this directly +in the function that inserts it. + +@code +struct list_el* list_insert(TALLOC_CTX *mem_ctx, + struct list_el *where, + void *ptr) +{ + struct list_el *el = talloc(mem_ctx, struct list_el); + el->data = ptr; + /* insert into list */ + + talloc_set_destructor(el, list_remove); + return el; +} +@endcode + +Because talloc is a hierarchical memory allocator, we can go a step further and +free the data with the element as well: + +@code +struct list_el* list_insert_free(TALLOC_CTX *mem_ctx, + struct list_el *where, + void *ptr) +{ + struct list_el *el = NULL; + el = list_insert(mem_ctx, where, ptr); + + talloc_steal(el, ptr); + + return el; +} +@endcode + +*/ diff --git a/doc/tutorial_dts.dox b/doc/tutorial_dts.dox new file mode 100644 index 0000000..75b5172 --- /dev/null +++ b/doc/tutorial_dts.dox @@ -0,0 +1,109 @@ +/** +@page libtalloc_dts Chapter 3: Dynamic type system + +@section dts Dynamic type system + +Generic programming in the C language is very difficult. There is no inheritance +nor templates known from object oriented languages. There is no dynamic type +system. Therefore, generic programming in this language is usually done by +type-casting a variable to <code>void*</code> and transferring it through +a generic function to a specialized callback as illustrated on the next listing. + +@code +void generic_function(callback_fn cb, void *pvt) +{ + /* do some stuff and call the callback */ + cb(pvt); +} + +void specific_callback(void *pvt) +{ + struct specific_struct *data; + data = (struct specific_struct*)pvt; + /* ... */ +} + +void specific_function() +{ + struct specific_struct data; + generic_function(callback, &data); +} +@endcode + +Unfortunately, the type information is lost as a result of this type cast. The +compiler cannot check the type during the compilation nor are we able to do it +at runtime. Providing an invalid data type to the callback will result in +unexpected behaviour (not necessarily a crash) of the application. This mistake +is usually hard to detect because it is not the first thing which comes the +mind. + +As we already know, every talloc context contains a name. This name is available +at any time and it can be used to determine the type of a context even if we +lose the type of a variable. + +Although the name of the context can be set to any arbitrary string, the best +way of using it to simulate the dynamic type system is to set it directly to the +type of the variable. + +It is recommended to use one of talloc() and talloc_array() (or its +variants) to create the context as they set its name to the name of the +given type automatically. + +If we have a context with such as a name, we can use two similar functions that +do both the type check and the type cast for us: + +- talloc_get_type() +- talloc_get_type_abort() + +@section dts-examples Examples + +The following example will show how generic programming with talloc is handled - +if we provide invalid data to the callback, the program will be aborted. This +is a sufficient reaction for such an error in most applications. + +@code +void foo_callback(void *pvt) +{ + struct foo *data = talloc_get_type_abort(pvt, struct foo); + /* ... */ +} + +int do_foo() +{ + struct foo *data = talloc_zero(NULL, struct foo); + /* ... */ + return generic_function(foo_callback, data); +} +@endcode + +But what if we are creating a service application that should be running for the +uptime of a server, we may want to abort the application during the development +process (to make sure the error is not overlooked) and try to recover from the +error in the customer release. This can be achieved by creating a custom abort +function with a conditional build. + +@code +void my_abort(const char *reason) +{ + fprintf(stderr, "talloc abort: %s\n", reason); +#ifdef ABORT_ON_TYPE_MISMATCH + abort(); +#endif +} +@endcode + +The usage of talloc_get_type_abort() would be then: + +@code +talloc_set_abort_fn(my_abort); + +TALLOC_CTX *ctx = talloc_new(NULL); +char *str = talloc_get_type_abort(ctx, char); +if (str == NULL) { + /* recovery code */ +} +/* talloc abort: ../src/main.c:25: Type mismatch: + name[talloc_new: ../src/main.c:24] expected[char] */ +@endcode + +*/ diff --git a/doc/tutorial_introduction.dox b/doc/tutorial_introduction.dox new file mode 100644 index 0000000..418c38b --- /dev/null +++ b/doc/tutorial_introduction.dox @@ -0,0 +1,45 @@ +/** +@page libtalloc_tutorial The Tutorial +@section introduction Introduction + +Talloc is a hierarchical, reference counted memory pool system with destructors. +It is built atop the C standard library and it defines a set of utility +functions that altogether simplifies allocation and deallocation of data, +especially for complex structures that contain many dynamically allocated +elements such as strings and arrays. + +The main goals of this library are: removing the needs for creating a cleanup +function for every complex structure, providing a logical organization of +allocated memory blocks and reducing the likelihood of creating memory leaks in +long-running applications. All of this is achieved by allocating memory in a +hierarchical structure of talloc contexts such that deallocating one context +recursively frees all of its descendants as well. + +@section main-features Main features +- An open source project +- A hierarchical memory model +- Natural projection of data structures into the memory space +- Simplifies memory management of large data structures +- Automatic execution of a destructor before the memory is freed +- Simulates a dynamic type system +- Implements a transparent memory pool + +@section toc Table of contents: + +@subpage libtalloc_context + +@subpage libtalloc_stealing + +@subpage libtalloc_dts + +@subpage libtalloc_destructors + +@subpage libtalloc_pools + +@subpage libtalloc_debugging + +@subpage libtalloc_bestpractices + +@subpage libtalloc_threads + +*/ diff --git a/doc/tutorial_pools.dox b/doc/tutorial_pools.dox new file mode 100644 index 0000000..a0d1e1a --- /dev/null +++ b/doc/tutorial_pools.dox @@ -0,0 +1,93 @@ +/** +@page libtalloc_pools Chapter 5: Memory pools + +@section pools Memory pools + +Allocation of a new memory is an expensive operation and large programs can +contain thousands of calls of malloc() for a single computation, where every +call allocates only a very small amount of the memory. This can result in an +undesirable slowdown of the application. We can avoid this slowdown by +decreasing the number of malloc() calls by using a memory pool. + +A memory pool is a preallocated memory space with a fixed size. If we need to +allocate new data we will take the desired amount of the memory from the pool +instead of requesting a new memory from the system. This is done by creating a +pointer that points inside the preallocated memory. Such a pool must not be +reallocated as it would change its location - pointers that were pointing +inside the pool would become invalid. Therefore, a memory pool requires a very +good estimate of the required memory space. + +The talloc library contains its own implementation of a memory pool. It is +highly transparent for the programmer. The only thing that needs to be done is +an initialization of a new pool context using talloc_pool() - +which can be used in the same way as any other context. + +Refactoring of existing code (that uses talloc) to take the advantage of a +memory pool is quite simple due to the following properties of the pool context: + +- if we are allocating data on a pool context, it takes the desired + amount of memory from the pool, +- if the context is a descendant of the pool context, it takes the space + from the pool as well, +- if the pool does not have sufficient portion of memory left, it will + create a new non-pool context, leaving the pool intact + +@code +/* allocate 1KiB in a pool */ +TALLOC_CTX *pool_ctx = talloc_pool(NULL, 1024); + +/* Take 512B from the pool, 512B is left there */ +void *ptr = talloc_size(pool_ctx, 512); + +/* 1024B > 512B, this will create new talloc chunk outside + the pool */ +void *ptr2 = talloc_size(ptr, 1024); + +/* The pool still contains 512 free bytes + * this will take 200B from them. */ +void *ptr3 = talloc_size(ptr, 200); + +/* This will destroy context 'ptr3' but the memory + * is not freed, the available space in the pool + * will increase to 512B. */ +talloc_free(ptr3); + +/* This will free memory taken by 'pool_ctx' + * and 'ptr2' as well. */ +talloc_free(pool_ctx); +@endcode + +The above given is very convenient, but there is one big issue to be kept in +mind. If the parent of a talloc pool child is changed to a parent that is +outside of this pool, the whole pool memory will not be freed until the child is +freed. For this reason we must be very careful when stealing a descendant of a +pool context. + +@code +TALLOC_CTX *mem_ctx = talloc_new(NULL); +TALLOC_CTX *pool_ctx = talloc_pool(NULL, 1024); +struct foo *foo = talloc(pool_ctx, struct foo); + +/* mem_ctx is not in the pool */ +talloc_steal(mem_ctx, foo); + +/* pool_ctx is marked as freed but the memory is not + deallocated, accessing the pool_ctx again will cause + an error */ +talloc_free(pool_ctx); + +/* This deallocates the pool_ctx. */ +talloc_free(mem_ctx); +@endcode + +It may often be better to copy the memory we want instead of stealing it to +avoid this problem. If we do not need to retain the context name (to keep the +type information), we can use talloc_memdup() to do this. + +Copying the memory out of the pool may, however, discard all the performance +boost given by the pool, depending on the size of the copied memory. Therefore, +the code should be well profiled before taking this path. In general, the +golden rule is: if we need to steal from the pool context, we should not +use a pool context. + +*/ diff --git a/doc/tutorial_stealing.dox b/doc/tutorial_stealing.dox new file mode 100644 index 0000000..67eae1d --- /dev/null +++ b/doc/tutorial_stealing.dox @@ -0,0 +1,55 @@ +/** +@page libtalloc_stealing Chapter 2: Stealing a context + +@section stealing Stealing a context + +Talloc has the ability to change the parent of a talloc context to another +one. This operation is commonly referred to as stealing and it is one of +the most important actions performed with talloc contexts. + +Stealing a context is necessary if we want the pointer to outlive the context it +is created on. This has many possible use cases, for instance stealing a result +of a database search to an in-memory cache context, changing the parent of a +field of a generic structure to a more specific one or vice-versa. The most +common scenario, at least in Samba, is to steal output data from a function-specific +context to the output context given as an argument of that function. + +@code +struct foo { + char *a1; + char *a2; + char *a3; +}; + +struct bar { + char *wurst; + struct foo *foo; +}; + +struct foo *foo = talloc_zero(ctx, struct foo); +foo->a1 = talloc_strdup(foo, "a1"); +foo->a2 = talloc_strdup(foo, "a2"); +foo->a3 = talloc_strdup(foo, "a3"); + +struct bar *bar = talloc_zero(NULL, struct bar); +/* change parent of foo from ctx to bar */ +bar->foo = talloc_steal(bar, foo); + +/* or do the same but assign foo = NULL */ +bar->foo = talloc_move(bar, &foo); +@endcode + +The talloc_move() function is similar to the talloc_steal() function but +additionally sets the source pointer to NULL. + +In general, the source pointer itself is not changed (it only replaces the +parent in the meta data). But the common usage is that the result is +assigned to another variable, thus further accessing the pointer from the +original variable should be avoided unless it is necessary. In this case +talloc_move() is the preferred way of stealing a context. Additionally sets the +source pointer to NULL, thus.protects the pointer from being accidentally freed +and accessed using the old variable after its parent has been changed. + +@image html stealing.png + +*/ diff --git a/doc/tutorial_threads.dox b/doc/tutorial_threads.dox new file mode 100644 index 0000000..111bbf5 --- /dev/null +++ b/doc/tutorial_threads.dox @@ -0,0 +1,203 @@ +/** +@page libtalloc_threads Chapter 8: Using threads with talloc + +@section Talloc and thread safety + +The talloc library is not internally thread-safe, in that accesses +to variables on a talloc context are not controlled by mutexes or +other thread-safe primitives. + +However, so long as talloc_disable_null_tracking() is called from +the main thread to disable global variable access within talloc, +then each thread can safely use its own top level talloc context +allocated off the NULL context. + +For example: + +@code +static void *thread_fn(void *arg) +{ + const char *ctx_name = (const char *)arg; + /* + * Create a new top level talloc hierarchy in + * this thread. + */ + void *top_ctx = talloc_named_const(NULL, 0, "top"); + if (top_ctx == NULL) { + return NULL; + } + sub_ctx = talloc_named_const(top_ctx, 100, ctx_name); + if (sub_ctx == NULL) { + return NULL; + } + + /* + * Do more processing/talloc calls on top_ctx + * and its children. + */ + ...... + + talloc_free(top_ctx); + return value; +} +@endcode + +is a perfectly safe use of talloc within a thread. + +The problem comes when one thread wishes to move some +memory allocated on its local top level talloc context +to another thread. Care must be taken to add data access +exclusion to prevent memory corruption. One method would +be to lock a mutex before any talloc call on each thread, +but this would push the burden of total talloc thread-safety +on the poor user of the library. + +A much easier way to transfer talloced memory between +threads is by the use of an intermediate, mutex locked, +intermediate variable. + +An example of this is below - taken from test code inside +the talloc testsuite. + +The main thread creates 1000 sub-threads, and then accepts +the transfer of some thread-talloc'ed memory onto its top +level context from each thread in turn. + +A pthread mutex and condition variable are used to +synchronize the transfer via the intermediate_ptr +variable. + +@code +/* Required sync variables. */ +static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER; + +/* Intermediate talloc pointer for transfer. */ +static void *intermediate_ptr; + +/* Subthread. */ +static void *thread_fn(void *arg) +{ + int ret; + const char *ctx_name = (const char *)arg; + void *sub_ctx = NULL; + /* + * Do stuff that creates a new talloc hierarchy in + * this thread. + */ + void *top_ctx = talloc_named_const(NULL, 0, "top"); + if (top_ctx == NULL) { + return NULL; + } + sub_ctx = talloc_named_const(top_ctx, 100, ctx_name); + if (sub_ctx == NULL) { + return NULL; + } + + /* + * Now transfer a pointer from our hierarchy + * onto the intermediate ptr. + */ + ret = pthread_mutex_lock(&mtx); + if (ret != 0) { + talloc_free(top_ctx); + return NULL; + } + + /* Wait for intermediate_ptr to be free. */ + while (intermediate_ptr != NULL) { + ret = pthread_cond_wait(&condvar, &mtx); + if (ret != 0) { + talloc_free(top_ctx); + return NULL; + } + } + + /* and move our memory onto it from our toplevel hierarchy. */ + intermediate_ptr = talloc_move(NULL, &sub_ctx); + + /* Tell the main thread it's ready for pickup. */ + pthread_cond_broadcast(&condvar); + pthread_mutex_unlock(&mtx); + + talloc_free(top_ctx); + return NULL; +} + +/* Main thread. */ + +#define NUM_THREADS 1000 + +static bool test_pthread_talloc_passing(void) +{ + int i; + int ret; + char str_array[NUM_THREADS][20]; + pthread_t thread_id; + void *mem_ctx; + + /* + * Important ! Null tracking breaks threaded talloc. + * It *must* be turned off. + */ + talloc_disable_null_tracking(); + + /* Main thread toplevel context. */ + mem_ctx = talloc_named_const(NULL, 0, "toplevel"); + if (mem_ctx == NULL) { + return false; + } + + /* + * Spin off NUM_THREADS threads. + * They will use their own toplevel contexts. + */ + for (i = 0; i < NUM_THREADS; i++) { + (void)snprintf(str_array[i], + 20, + "thread:%d", + i); + if (str_array[i] == NULL) { + return false; + } + ret = pthread_create(&thread_id, + NULL, + thread_fn, + str_array[i]); + if (ret != 0) { + return false; + } + } + + /* Now wait for NUM_THREADS transfers of the talloc'ed memory. */ + for (i = 0; i < NUM_THREADS; i++) { + ret = pthread_mutex_lock(&mtx); + if (ret != 0) { + talloc_free(mem_ctx); + return false; + } + + /* Wait for intermediate_ptr to have our data. */ + while (intermediate_ptr == NULL) { + ret = pthread_cond_wait(&condvar, &mtx); + if (ret != 0) { + talloc_free(mem_ctx); + return false; + } + } + + /* and move it onto our toplevel hierarchy. */ + (void)talloc_move(mem_ctx, &intermediate_ptr); + + /* Tell the sub-threads we're ready for another. */ + pthread_cond_broadcast(&condvar); + pthread_mutex_unlock(&mtx); + } + + /* Dump the hierarchy. */ + talloc_report(mem_ctx, stdout); + talloc_free(mem_ctx); + return true; +} +@endcode +*/ diff --git a/doxy.config b/doxy.config new file mode 100644 index 0000000..0e27d61 --- /dev/null +++ b/doxy.config @@ -0,0 +1,1807 @@ +# Doxyfile 1.8.0 + +# This file describes the settings to be used by the documentation system +# doxygen (www.doxygen.org) for a project. +# +# All text after a hash (#) is considered a comment and will be ignored. +# The format is: +# TAG = value [value, ...] +# For lists items can also be appended using: +# TAG += value [value, ...] +# Values that contain spaces should be placed between quotes (" "). + +#--------------------------------------------------------------------------- +# Project related configuration options +#--------------------------------------------------------------------------- + +# This tag specifies the encoding used for all characters in the config file +# that follow. The default is UTF-8 which is also the encoding used for all +# text before the first occurrence of this tag. Doxygen uses libiconv (or the +# iconv built into libc) for the transcoding. See +# http://www.gnu.org/software/libiconv for the list of possible encodings. + +DOXYFILE_ENCODING = UTF-8 + +# The PROJECT_NAME tag is a single word (or sequence of words) that should +# identify the project. Note that if you do not use Doxywizard you need +# to put quotes around the project name if it contains spaces. + +PROJECT_NAME = talloc + +# The PROJECT_NUMBER tag can be used to enter a project or revision number. +# This could be handy for archiving the generated documentation or +# if some version control system is used. + +PROJECT_NUMBER = 2.0 + +# Using the PROJECT_BRIEF tag one can provide an optional one line description +# for a project that appears at the top of each page and should give viewer +# a quick idea about the purpose of the project. Keep the description short. + +PROJECT_BRIEF = + +# With the PROJECT_LOGO tag one can specify an logo or icon that is +# included in the documentation. The maximum height of the logo should not +# exceed 55 pixels and the maximum width should not exceed 200 pixels. +# Doxygen will copy the logo to the output directory. + +PROJECT_LOGO = + +# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) +# base path where the generated documentation will be put. +# If a relative path is entered, it will be relative to the location +# where doxygen was started. If left blank the current directory will be used. + +OUTPUT_DIRECTORY = doc + +# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create +# 4096 sub-directories (in 2 levels) under the output directory of each output +# format and will distribute the generated files over these directories. +# Enabling this option can be useful when feeding doxygen a huge amount of +# source files, where putting all generated files in the same directory would +# otherwise cause performance problems for the file system. + +CREATE_SUBDIRS = NO + +# The OUTPUT_LANGUAGE tag is used to specify the language in which all +# documentation generated by doxygen is written. Doxygen will use this +# information to generate all constant output in the proper language. +# The default language is English, other supported languages are: +# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional, +# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German, +# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English +# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian, +# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak, +# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese. + +OUTPUT_LANGUAGE = English + +# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will +# include brief member descriptions after the members that are listed in +# the file and class documentation (similar to JavaDoc). +# Set to NO to disable this. + +BRIEF_MEMBER_DESC = YES + +# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend +# the brief description of a member or function before the detailed description. +# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the +# brief descriptions will be completely suppressed. + +REPEAT_BRIEF = YES + +# This tag implements a quasi-intelligent brief description abbreviator +# that is used to form the text in various listings. Each string +# in this list, if found as the leading text of the brief description, will be +# stripped from the text and the result after processing the whole list, is +# used as the annotated text. Otherwise, the brief description is used as-is. +# If left blank, the following values are used ("$name" is automatically +# replaced with the name of the entity): "The $name class" "The $name widget" +# "The $name file" "is" "provides" "specifies" "contains" +# "represents" "a" "an" "the" + +ABBREVIATE_BRIEF = "The $name class" \ + "The $name widget" \ + "The $name file" \ + is \ + provides \ + specifies \ + contains \ + represents \ + a \ + an \ + the + +# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then +# Doxygen will generate a detailed section even if there is only a brief +# description. + +ALWAYS_DETAILED_SEC = NO + +# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all +# inherited members of a class in the documentation of that class as if those +# members were ordinary class members. Constructors, destructors and assignment +# operators of the base classes will not be shown. + +INLINE_INHERITED_MEMB = NO + +# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full +# path before files name in the file list and in the header files. If set +# to NO the shortest path that makes the file name unique will be used. + +FULL_PATH_NAMES = YES + +# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag +# can be used to strip a user-defined part of the path. Stripping is +# only done if one of the specified strings matches the left-hand part of +# the path. The tag can be used to show relative paths in the file list. +# If left blank the directory from which doxygen is run is used as the +# path to strip. + +STRIP_FROM_PATH = + +# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of +# the path mentioned in the documentation of a class, which tells +# the reader which header file to include in order to use a class. +# If left blank only the name of the header file containing the class +# definition is used. Otherwise one should specify the include paths that +# are normally passed to the compiler using the -I flag. + +STRIP_FROM_INC_PATH = + +# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter +# (but less readable) file names. This can be useful if your file system +# doesn't support long names like on DOS, Mac, or CD-ROM. + +SHORT_NAMES = NO + +# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen +# will interpret the first line (until the first dot) of a JavaDoc-style +# comment as the brief description. If set to NO, the JavaDoc +# comments will behave just like regular Qt-style comments +# (thus requiring an explicit @brief command for a brief description.) + +JAVADOC_AUTOBRIEF = YES + +# If the QT_AUTOBRIEF tag is set to YES then Doxygen will +# interpret the first line (until the first dot) of a Qt-style +# comment as the brief description. If set to NO, the comments +# will behave just like regular Qt-style comments (thus requiring +# an explicit \brief command for a brief description.) + +QT_AUTOBRIEF = NO + +# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen +# treat a multi-line C++ special comment block (i.e. a block of //! or /// +# comments) as a brief description. This used to be the default behaviour. +# The new default is to treat a multi-line C++ comment block as a detailed +# description. Set this tag to YES if you prefer the old behaviour instead. + +MULTILINE_CPP_IS_BRIEF = NO + +# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented +# member inherits the documentation from any documented member that it +# re-implements. + +INHERIT_DOCS = YES + +# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce +# a new page for each member. If set to NO, the documentation of a member will +# be part of the file/class/namespace that contains it. + +SEPARATE_MEMBER_PAGES = NO + +# The TAB_SIZE tag can be used to set the number of spaces in a tab. +# Doxygen uses this value to replace tabs by spaces in code fragments. + +TAB_SIZE = 8 + +# This tag can be used to specify a number of aliases that acts +# as commands in the documentation. An alias has the form "name=value". +# For example adding "sideeffect=\par Side Effects:\n" will allow you to +# put the command \sideeffect (or @sideeffect) in the documentation, which +# will result in a user-defined paragraph with heading "Side Effects:". +# You can put \n's in the value part of an alias to insert newlines. + +ALIASES = + +# This tag can be used to specify a number of word-keyword mappings (TCL only). +# A mapping has the form "name=value". For example adding +# "class=itcl::class" will allow you to use the command class in the +# itcl::class meaning. + +TCL_SUBST = + +# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C +# sources only. Doxygen will then generate output that is more tailored for C. +# For instance, some of the names that are used will be different. The list +# of all members will be omitted, etc. + +OPTIMIZE_OUTPUT_FOR_C = YES + +# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java +# sources only. Doxygen will then generate output that is more tailored for +# Java. For instance, namespaces will be presented as packages, qualified +# scopes will look different, etc. + +OPTIMIZE_OUTPUT_JAVA = NO + +# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran +# sources only. Doxygen will then generate output that is more tailored for +# Fortran. + +OPTIMIZE_FOR_FORTRAN = NO + +# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL +# sources. Doxygen will then generate output that is tailored for +# VHDL. + +OPTIMIZE_OUTPUT_VHDL = NO + +# Doxygen selects the parser to use depending on the extension of the files it +# parses. With this tag you can assign which parser to use for a given extension. +# Doxygen has a built-in mapping, but you can override or extend it using this +# tag. The format is ext=language, where ext is a file extension, and language +# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C, +# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make +# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C +# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions +# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen. + +EXTENSION_MAPPING = + +# If MARKDOWN_SUPPORT is enabled (the default) then doxygen pre-processes all +# comments according to the Markdown format, which allows for more readable +# documentation. See http://daringfireball.net/projects/markdown/ for details. +# The output of markdown processing is further processed by doxygen, so you +# can mix doxygen, HTML, and XML commands with Markdown formatting. +# Disable only in case of backward compatibilities issues. + +MARKDOWN_SUPPORT = YES + +# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want +# to include (a tag file for) the STL sources as input, then you should +# set this tag to YES in order to let doxygen match functions declarations and +# definitions whose arguments contain STL classes (e.g. func(std::string); v.s. +# func(std::string) {}). This also makes the inheritance and collaboration +# diagrams that involve STL classes more complete and accurate. + +BUILTIN_STL_SUPPORT = NO + +# If you use Microsoft's C++/CLI language, you should set this option to YES to +# enable parsing support. + +CPP_CLI_SUPPORT = NO + +# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only. +# Doxygen will parse them like normal C++ but will assume all classes use public +# instead of private inheritance when no explicit protection keyword is present. + +SIP_SUPPORT = NO + +# For Microsoft's IDL there are propget and propput attributes to indicate getter +# and setter methods for a property. Setting this option to YES (the default) +# will make doxygen replace the get and set methods by a property in the +# documentation. This will only work if the methods are indeed getting or +# setting a simple type. If this is not the case, or you want to show the +# methods anyway, you should set this option to NO. + +IDL_PROPERTY_SUPPORT = YES + +# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC +# tag is set to YES, then doxygen will reuse the documentation of the first +# member in the group (if any) for the other members of the group. By default +# all members of a group must be documented explicitly. + +DISTRIBUTE_GROUP_DOC = NO + +# Set the SUBGROUPING tag to YES (the default) to allow class member groups of +# the same type (for instance a group of public functions) to be put as a +# subgroup of that type (e.g. under the Public Functions section). Set it to +# NO to prevent subgrouping. Alternatively, this can be done per class using +# the \nosubgrouping command. + +SUBGROUPING = YES + +# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and +# unions are shown inside the group in which they are included (e.g. using +# @ingroup) instead of on a separate page (for HTML and Man pages) or +# section (for LaTeX and RTF). + +INLINE_GROUPED_CLASSES = NO + +# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and +# unions with only public data fields will be shown inline in the documentation +# of the scope in which they are defined (i.e. file, namespace, or group +# documentation), provided this scope is documented. If set to NO (the default), +# structs, classes, and unions are shown on a separate page (for HTML and Man +# pages) or section (for LaTeX and RTF). + +INLINE_SIMPLE_STRUCTS = NO + +# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum +# is documented as struct, union, or enum with the name of the typedef. So +# typedef struct TypeS {} TypeT, will appear in the documentation as a struct +# with name TypeT. When disabled the typedef will appear as a member of a file, +# namespace, or class. And the struct will be named TypeS. This can typically +# be useful for C code in case the coding convention dictates that all compound +# types are typedef'ed and only the typedef is referenced, never the tag name. + +TYPEDEF_HIDES_STRUCT = NO + +# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to +# determine which symbols to keep in memory and which to flush to disk. +# When the cache is full, less often used symbols will be written to disk. +# For small to medium size projects (<1000 input files) the default value is +# probably good enough. For larger projects a too small cache size can cause +# doxygen to be busy swapping symbols to and from disk most of the time +# causing a significant performance penalty. +# If the system has enough physical memory increasing the cache will improve the +# performance by keeping more symbols in memory. Note that the value works on +# a logarithmic scale so increasing the size by one will roughly double the +# memory usage. The cache size is given by this formula: +# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +SYMBOL_CACHE_SIZE = 0 + +# Similar to the SYMBOL_CACHE_SIZE the size of the symbol lookup cache can be +# set using LOOKUP_CACHE_SIZE. This cache is used to resolve symbols given +# their name and scope. Since this can be an expensive process and often the +# same symbol appear multiple times in the code, doxygen keeps a cache of +# pre-resolved symbols. If the cache is too small doxygen will become slower. +# If the cache is too large, memory is wasted. The cache size is given by this +# formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range is 0..9, the default is 0, +# corresponding to a cache size of 2^16 = 65536 symbols. + +LOOKUP_CACHE_SIZE = 0 + +#--------------------------------------------------------------------------- +# Build related configuration options +#--------------------------------------------------------------------------- + +# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# documentation are documented, even if no documentation was available. +# Private class members and static file members will be hidden unless +# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES + +EXTRACT_ALL = NO + +# If the EXTRACT_PRIVATE tag is set to YES all private members of a class +# will be included in the documentation. + +EXTRACT_PRIVATE = NO + +# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal scope will be included in the documentation. + +EXTRACT_PACKAGE = NO + +# If the EXTRACT_STATIC tag is set to YES all static members of a file +# will be included in the documentation. + +EXTRACT_STATIC = NO + +# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) +# defined locally in source files will be included in the documentation. +# If set to NO only classes defined in header files are included. + +EXTRACT_LOCAL_CLASSES = NO + +# This flag is only useful for Objective-C code. When set to YES local +# methods, which are defined in the implementation section but not in +# the interface are included in the documentation. +# If set to NO (the default) only methods in the interface are included. + +EXTRACT_LOCAL_METHODS = NO + +# If this flag is set to YES, the members of anonymous namespaces will be +# extracted and appear in the documentation as a namespace called +# 'anonymous_namespace{file}', where file will be replaced with the base +# name of the file that contains the anonymous namespace. By default +# anonymous namespaces are hidden. + +EXTRACT_ANON_NSPACES = NO + +# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all +# undocumented members of documented classes, files or namespaces. +# If set to NO (the default) these members will be included in the +# various overviews, but no documentation section is generated. +# This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_MEMBERS = YES + +# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all +# undocumented classes that are normally visible in the class hierarchy. +# If set to NO (the default) these classes will be included in the various +# overviews. This option has no effect if EXTRACT_ALL is enabled. + +HIDE_UNDOC_CLASSES = YES + +# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all +# friend (class|struct|union) declarations. +# If set to NO (the default) these declarations will be included in the +# documentation. + +HIDE_FRIEND_COMPOUNDS = NO + +# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any +# documentation blocks found inside the body of a function. +# If set to NO (the default) these blocks will be appended to the +# function's detailed documentation block. + +HIDE_IN_BODY_DOCS = NO + +# The INTERNAL_DOCS tag determines if documentation +# that is typed after a \internal command is included. If the tag is set +# to NO (the default) then the documentation will be excluded. +# Set it to YES to include the internal documentation. + +INTERNAL_DOCS = NO + +# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate +# file names in lower-case letters. If set to YES upper-case letters are also +# allowed. This is useful if you have classes or files whose names only differ +# in case and if your file system supports case sensitive file names. Windows +# and Mac users are advised to set this option to NO. + +CASE_SENSE_NAMES = YES + +# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen +# will show members with their full class and namespace scopes in the +# documentation. If set to YES the scope will be hidden. + +HIDE_SCOPE_NAMES = NO + +# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen +# will put a list of the files that are included by a file in the documentation +# of that file. + +SHOW_INCLUDE_FILES = YES + +# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen +# will list include files with double quotes in the documentation +# rather than with sharp brackets. + +FORCE_LOCAL_INCLUDES = NO + +# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] +# is inserted in the documentation for inline members. + +INLINE_INFO = YES + +# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen +# will sort the (detailed) documentation of file and class members +# alphabetically by member name. If set to NO the members will appear in +# declaration order. + +SORT_MEMBER_DOCS = YES + +# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the +# brief documentation of file, namespace and class members alphabetically +# by member name. If set to NO (the default) the members will appear in +# declaration order. + +SORT_BRIEF_DOCS = NO + +# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen +# will sort the (brief and detailed) documentation of class members so that +# constructors and destructors are listed first. If set to NO (the default) +# the constructors will appear in the respective orders defined by +# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS. +# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO +# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO. + +SORT_MEMBERS_CTORS_1ST = NO + +# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the +# hierarchy of group names into alphabetical order. If set to NO (the default) +# the group names will appear in their defined order. + +SORT_GROUP_NAMES = NO + +# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be +# sorted by fully-qualified names, including namespaces. If set to +# NO (the default), the class list will be sorted only by class name, +# not including the namespace part. +# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES. +# Note: This option applies only to the class list, not to the +# alphabetical list. + +SORT_BY_SCOPE_NAME = NO + +# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to +# do proper type resolution of all parameters of a function it will reject a +# match between the prototype and the implementation of a member function even +# if there is only one candidate or it is obvious which candidate to choose +# by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen +# will still accept a match between prototype and implementation in such cases. + +STRICT_PROTO_MATCHING = NO + +# The GENERATE_TODOLIST tag can be used to enable (YES) or +# disable (NO) the todo list. This list is created by putting \todo +# commands in the documentation. + +GENERATE_TODOLIST = YES + +# The GENERATE_TESTLIST tag can be used to enable (YES) or +# disable (NO) the test list. This list is created by putting \test +# commands in the documentation. + +GENERATE_TESTLIST = YES + +# The GENERATE_BUGLIST tag can be used to enable (YES) or +# disable (NO) the bug list. This list is created by putting \bug +# commands in the documentation. + +GENERATE_BUGLIST = YES + +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or +# disable (NO) the deprecated list. This list is created by putting +# \deprecated commands in the documentation. + +GENERATE_DEPRECATEDLIST= YES + +# The ENABLED_SECTIONS tag can be used to enable conditional +# documentation sections, marked by \if sectionname ... \endif. + +ENABLED_SECTIONS = + +# The MAX_INITIALIZER_LINES tag determines the maximum number of lines +# the initial value of a variable or macro consists of for it to appear in +# the documentation. If the initializer consists of more lines than specified +# here it will be hidden. Use a value of 0 to hide initializers completely. +# The appearance of the initializer of individual variables and macros in the +# documentation can be controlled using \showinitializer or \hideinitializer +# command in the documentation regardless of this setting. + +MAX_INITIALIZER_LINES = 30 + +# Set the SHOW_USED_FILES tag to NO to disable the list of files generated +# at the bottom of the documentation of classes and structs. If set to YES the +# list will mention the files that were used to generate the documentation. + +SHOW_USED_FILES = YES + +# Set the SHOW_FILES tag to NO to disable the generation of the Files page. +# This will remove the Files entry from the Quick Index and from the +# Folder Tree View (if specified). The default is YES. + +SHOW_FILES = YES + +# Set the SHOW_NAMESPACES tag to NO to disable the generation of the +# Namespaces page. +# This will remove the Namespaces entry from the Quick Index +# and from the Folder Tree View (if specified). The default is YES. + +SHOW_NAMESPACES = YES + +# The FILE_VERSION_FILTER tag can be used to specify a program or script that +# doxygen should invoke to get the current version for each file (typically from +# the version control system). Doxygen will invoke the program by executing (via +# popen()) the command <command> <input-file>, where <command> is the value of +# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file +# provided by doxygen. Whatever the program writes to standard output +# is used as the file version. See the manual for examples. + +FILE_VERSION_FILTER = + +# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed +# by doxygen. The layout file controls the global structure of the generated +# output files in an output format independent way. The create the layout file +# that represents doxygen's defaults, run doxygen with the -l option. +# You can optionally specify a file name after the option, if omitted +# DoxygenLayout.xml will be used as the name of the layout file. + +LAYOUT_FILE = + +# The CITE_BIB_FILES tag can be used to specify one or more bib files +# containing the references data. This must be a list of .bib files. The +# .bib extension is automatically appended if omitted. Using this command +# requires the bibtex tool to be installed. See also +# http://en.wikipedia.org/wiki/BibTeX for more info. For LaTeX the style +# of the bibliography can be controlled using LATEX_BIB_STYLE. To use this +# feature you need bibtex and perl available in the search path. + +CITE_BIB_FILES = + +#--------------------------------------------------------------------------- +# configuration options related to warning and progress messages +#--------------------------------------------------------------------------- + +# The QUIET tag can be used to turn on/off the messages that are generated +# by doxygen. Possible values are YES and NO. If left blank NO is used. + +QUIET = NO + +# The WARNINGS tag can be used to turn on/off the warning messages that are +# generated by doxygen. Possible values are YES and NO. If left blank +# NO is used. + +WARNINGS = YES + +# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings +# for undocumented members. If EXTRACT_ALL is set to YES then this flag will +# automatically be disabled. + +WARN_IF_UNDOCUMENTED = YES + +# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for +# potential errors in the documentation, such as not documenting some +# parameters in a documented function, or documenting parameters that +# don't exist or using markup commands wrongly. + +WARN_IF_DOC_ERROR = YES + +# The WARN_NO_PARAMDOC option can be enabled to get warnings for +# functions that are documented, but have no documentation for their parameters +# or return value. If set to NO (the default) doxygen will only warn about +# wrong or incomplete parameter documentation, but not about the absence of +# documentation. + +WARN_NO_PARAMDOC = NO + +# The WARN_FORMAT tag determines the format of the warning messages that +# doxygen can produce. The string should contain the $file, $line, and $text +# tags, which will be replaced by the file and line number from which the +# warning originated and the warning text. Optionally the format may contain +# $version, which will be replaced by the version of the file (if it could +# be obtained via FILE_VERSION_FILTER) + +WARN_FORMAT = "$file:$line: $text" + +# The WARN_LOGFILE tag can be used to specify a file to which warning +# and error messages should be written. If left blank the output is written +# to stderr. + +WARN_LOGFILE = + +#--------------------------------------------------------------------------- +# configuration options related to the input files +#--------------------------------------------------------------------------- + +# The INPUT tag can be used to specify the files and/or directories that contain +# documented source files. You may enter file names like "myfile.cpp" or +# directories like "/usr/src/myproject". Separate the files or directories +# with spaces. + +INPUT = . \ + doc + +# This tag can be used to specify the character encoding of the source files +# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is +# also the default input encoding. Doxygen uses libiconv (or the iconv built +# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for +# the list of possible encodings. + +INPUT_ENCODING = UTF-8 + +# If the value of the INPUT tag contains directories, you can use the +# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank the following patterns are tested: +# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh +# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py +# *.f90 *.f *.for *.vhd *.vhdl + +FILE_PATTERNS = *.cpp \ + *.cc \ + *.c \ + *.h \ + *.hh \ + *.hpp \ + *.dox + +# The RECURSIVE tag can be used to turn specify whether or not subdirectories +# should be searched for input files as well. Possible values are YES and NO. +# If left blank NO is used. + +RECURSIVE = NO + +# The EXCLUDE tag can be used to specify files and/or directories that should be +# excluded from the INPUT source files. This way you can easily exclude a +# subdirectory from a directory tree whose root is specified with the INPUT tag. +# Note that relative paths are relative to the directory from which doxygen is +# run. + +EXCLUDE = + +# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or +# directories that are symbolic links (a Unix file system feature) are excluded +# from the input. + +EXCLUDE_SYMLINKS = NO + +# If the value of the INPUT tag contains directories, you can use the +# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude +# certain files from those directories. Note that the wildcards are matched +# against the file with absolute path, so to exclude all test directories +# for example use the pattern */test/* + +EXCLUDE_PATTERNS = */.git/* + +# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names +# (namespaces, classes, functions, etc.) that should be excluded from the +# output. The symbol name can be a fully qualified name, a word, or if the +# wildcard * is used, a substring. Examples: ANamespace, AClass, +# AClass::ANamespace, ANamespace::*Test + +EXCLUDE_SYMBOLS = + +# The EXAMPLE_PATH tag can be used to specify one or more files or +# directories that contain example code fragments that are included (see +# the \include command). + +EXAMPLE_PATH = + +# If the value of the EXAMPLE_PATH tag contains directories, you can use the +# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp +# and *.h) to filter out the source-files in the directories. If left +# blank all files are included. + +EXAMPLE_PATTERNS = + +# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be +# searched for input files to be used with the \include or \dontinclude +# commands irrespective of the value of the RECURSIVE tag. +# Possible values are YES and NO. If left blank NO is used. + +EXAMPLE_RECURSIVE = NO + +# The IMAGE_PATH tag can be used to specify one or more files or +# directories that contain image that are included in the documentation (see +# the \image command). + +IMAGE_PATH = doc + +# The INPUT_FILTER tag can be used to specify a program that doxygen should +# invoke to filter for each input file. Doxygen will invoke the filter program +# by executing (via popen()) the command <filter> <input-file>, where <filter> +# is the value of the INPUT_FILTER tag, and <input-file> is the name of an +# input file. Doxygen will then use the output that the filter program writes +# to standard output. +# If FILTER_PATTERNS is specified, this tag will be +# ignored. + +INPUT_FILTER = + +# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern +# basis. +# Doxygen will compare the file name with each pattern and apply the +# filter if there is a match. +# The filters are a list of the form: +# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further +# info on how filters are used. If FILTER_PATTERNS is empty or if +# non of the patterns match the file name, INPUT_FILTER is applied. + +FILTER_PATTERNS = + +# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using +# INPUT_FILTER) will be used to filter the input files when producing source +# files to browse (i.e. when SOURCE_BROWSER is set to YES). + +FILTER_SOURCE_FILES = NO + +# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file +# pattern. A pattern will override the setting for FILTER_PATTERN (if any) +# and it is also possible to disable source filtering for a specific pattern +# using *.ext= (so without naming a filter). This option only has effect when +# FILTER_SOURCE_FILES is enabled. + +FILTER_SOURCE_PATTERNS = + +#--------------------------------------------------------------------------- +# configuration options related to source browsing +#--------------------------------------------------------------------------- + +# If the SOURCE_BROWSER tag is set to YES then a list of source files will +# be generated. Documented entities will be cross-referenced with these sources. +# Note: To get rid of all source code in the generated output, make sure also +# VERBATIM_HEADERS is set to NO. + +SOURCE_BROWSER = NO + +# Setting the INLINE_SOURCES tag to YES will include the body +# of functions and classes directly in the documentation. + +INLINE_SOURCES = NO + +# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct +# doxygen to hide any special comment blocks from generated source code +# fragments. Normal C and C++ comments will always remain visible. + +STRIP_CODE_COMMENTS = YES + +# If the REFERENCED_BY_RELATION tag is set to YES +# then for each documented function all documented +# functions referencing it will be listed. + +REFERENCED_BY_RELATION = NO + +# If the REFERENCES_RELATION tag is set to YES +# then for each documented function all documented entities +# called/used by that function will be listed. + +REFERENCES_RELATION = NO + +# If the REFERENCES_LINK_SOURCE tag is set to YES (the default) +# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from +# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will +# link to the source code. +# Otherwise they will link to the documentation. + +REFERENCES_LINK_SOURCE = YES + +# If the USE_HTAGS tag is set to YES then the references to source code +# will point to the HTML generated by the htags(1) tool instead of doxygen +# built-in source browser. The htags tool is part of GNU's global source +# tagging system (see http://www.gnu.org/software/global/global.html). You +# will need version 4.8.6 or higher. + +USE_HTAGS = NO + +# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen +# will generate a verbatim copy of the header file for each class for +# which an include is specified. Set to NO to disable this. + +VERBATIM_HEADERS = YES + +#--------------------------------------------------------------------------- +# configuration options related to the alphabetical class index +#--------------------------------------------------------------------------- + +# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index +# of all compounds will be generated. Enable this if the project +# contains a lot of classes, structs, unions or interfaces. + +ALPHABETICAL_INDEX = NO + +# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then +# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns +# in which this list will be split (can be a number in the range [1..20]) + +COLS_IN_ALPHA_INDEX = 5 + +# In case all classes in a project start with a common prefix, all +# classes will be put under the same header in the alphabetical index. +# The IGNORE_PREFIX tag can be used to specify one or more prefixes that +# should be ignored while generating the index headers. + +IGNORE_PREFIX = + +#--------------------------------------------------------------------------- +# configuration options related to the HTML output +#--------------------------------------------------------------------------- + +# If the GENERATE_HTML tag is set to YES (the default) Doxygen will +# generate HTML output. + +GENERATE_HTML = YES + +# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `html' will be used as the default path. + +HTML_OUTPUT = html + +# The HTML_FILE_EXTENSION tag can be used to specify the file extension for +# each generated HTML page (for example: .htm,.php,.asp). If it is left blank +# doxygen will generate files with .html extension. + +HTML_FILE_EXTENSION = .html + +# The HTML_HEADER tag can be used to specify a personal HTML header for +# each generated HTML page. If it is left blank doxygen will generate a +# standard header. Note that when using a custom header you are responsible +# for the proper inclusion of any scripts and style sheets that doxygen +# needs, which is dependent on the configuration options used. +# It is advised to generate a default header using "doxygen -w html +# header.html footer.html stylesheet.css YourConfigFile" and then modify +# that header. Note that the header is subject to change so you typically +# have to redo this when upgrading to a newer version of doxygen or when +# changing the value of configuration settings such as GENERATE_TREEVIEW! + +HTML_HEADER = + +# The HTML_FOOTER tag can be used to specify a personal HTML footer for +# each generated HTML page. If it is left blank doxygen will generate a +# standard footer. + +HTML_FOOTER = + +# The HTML_STYLESHEET tag can be used to specify a user-defined cascading +# style sheet that is used by each HTML page. It can be used to +# fine-tune the look of the HTML output. If the tag is left blank doxygen +# will generate a default style sheet. Note that doxygen will try to copy +# the style sheet file to the HTML output directory, so don't put your own +# style sheet in the HTML output directory as well, or it will be erased! + +HTML_STYLESHEET = + +# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or +# other source files which should be copied to the HTML output directory. Note +# that these files will be copied to the base HTML output directory. Use the +# $relpath$ marker in the HTML_HEADER and/or HTML_FOOTER files to load these +# files. In the HTML_STYLESHEET file, use the file name only. Also note that +# the files will be copied as-is; there are no commands or markers available. + +HTML_EXTRA_FILES = + +# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. +# Doxygen will adjust the colors in the style sheet and background images +# according to this color. Hue is specified as an angle on a colorwheel, +# see http://en.wikipedia.org/wiki/Hue for more information. +# For instance the value 0 represents red, 60 is yellow, 120 is green, +# 180 is cyan, 240 is blue, 300 purple, and 360 is red again. +# The allowed range is 0 to 359. + +HTML_COLORSTYLE_HUE = 220 + +# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of +# the colors in the HTML output. For a value of 0 the output will use +# grayscales only. A value of 255 will produce the most vivid colors. + +HTML_COLORSTYLE_SAT = 100 + +# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to +# the luminance component of the colors in the HTML output. Values below +# 100 gradually make the output lighter, whereas values above 100 make +# the output darker. The value divided by 100 is the actual gamma applied, +# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2, +# and 100 does not change the gamma. + +HTML_COLORSTYLE_GAMMA = 80 + +# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML +# page will contain the date and time when the page was generated. Setting +# this to NO can help when comparing the output of multiple runs. + +HTML_TIMESTAMP = NO + +# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, +# files or namespaces will be aligned in HTML using tables. If set to +# NO a bullet list will be used. + +HTML_ALIGN_MEMBERS = YES + +# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML +# documentation will contain sections that can be hidden and shown after the +# page has loaded. For this to work a browser that supports +# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox +# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari). + +HTML_DYNAMIC_SECTIONS = NO + +# If the GENERATE_DOCSET tag is set to YES, additional index files +# will be generated that can be used as input for Apple's Xcode 3 +# integrated development environment, introduced with OSX 10.5 (Leopard). +# To create a documentation set, doxygen will generate a Makefile in the +# HTML output directory. Running make will produce the docset in that +# directory and running "make install" will install the docset in +# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find +# it at startup. +# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html +# for more information. + +GENERATE_DOCSET = NO + +# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the +# feed. A documentation feed provides an umbrella under which multiple +# documentation sets from a single provider (such as a company or product suite) +# can be grouped. + +DOCSET_FEEDNAME = "Doxygen generated docs" + +# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that +# should uniquely identify the documentation set bundle. This should be a +# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen +# will append .docset to the name. + +DOCSET_BUNDLE_ID = org.doxygen.Project + +# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify +# the documentation publisher. This should be a reverse domain-name style +# string, e.g. com.mycompany.MyDocSet.documentation. + +DOCSET_PUBLISHER_ID = org.doxygen.Publisher + +# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher. + +DOCSET_PUBLISHER_NAME = Publisher + +# If the GENERATE_HTMLHELP tag is set to YES, additional index files +# will be generated that can be used as input for tools like the +# Microsoft HTML help workshop to generate a compiled HTML help file (.chm) +# of the generated HTML documentation. + +GENERATE_HTMLHELP = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can +# be used to specify the file name of the resulting .chm file. You +# can add a path in front of the file if the result should not be +# written to the html output directory. + +CHM_FILE = + +# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can +# be used to specify the location (absolute path including file name) of +# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run +# the HTML help compiler on the generated index.hhp. + +HHC_LOCATION = + +# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag +# controls if a separate .chi index file is generated (YES) or that +# it should be included in the master .chm file (NO). + +GENERATE_CHI = NO + +# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING +# is used to encode HtmlHelp index (hhk), content (hhc) and project file +# content. + +CHM_INDEX_ENCODING = + +# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag +# controls whether a binary table of contents is generated (YES) or a +# normal table of contents (NO) in the .chm file. + +BINARY_TOC = NO + +# The TOC_EXPAND flag can be set to YES to add extra items for group members +# to the contents of the HTML help documentation and to the tree view. + +TOC_EXPAND = NO + +# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and +# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated +# that can be used as input for Qt's qhelpgenerator to generate a +# Qt Compressed Help (.qch) of the generated HTML documentation. + +GENERATE_QHP = NO + +# If the QHG_LOCATION tag is specified, the QCH_FILE tag can +# be used to specify the file name of the resulting .qch file. +# The path specified is relative to the HTML output folder. + +QCH_FILE = + +# The QHP_NAMESPACE tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#namespace + +QHP_NAMESPACE = + +# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating +# Qt Help Project output. For more information please see +# http://doc.trolltech.com/qthelpproject.html#virtual-folders + +QHP_VIRTUAL_FOLDER = doc + +# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to +# add. For more information please see +# http://doc.trolltech.com/qthelpproject.html#custom-filters + +QHP_CUST_FILTER_NAME = + +# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the +# custom filter to add. For more information please see +# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters"> +# Qt Help Project / Custom Filters</a>. + +QHP_CUST_FILTER_ATTRS = + +# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this +# project's +# filter section matches. +# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes"> +# Qt Help Project / Filter Attributes</a>. + +QHP_SECT_FILTER_ATTRS = + +# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can +# be used to specify the location of Qt's qhelpgenerator. +# If non-empty doxygen will try to run qhelpgenerator on the generated +# .qhp file. + +QHG_LOCATION = + +# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files +# will be generated, which together with the HTML files, form an Eclipse help +# plugin. To install this plugin and make it available under the help contents +# menu in Eclipse, the contents of the directory containing the HTML and XML +# files needs to be copied into the plugins directory of eclipse. The name of +# the directory within the plugins directory should be the same as +# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before +# the help appears. + +GENERATE_ECLIPSEHELP = NO + +# A unique identifier for the eclipse help plugin. When installing the plugin +# the directory name containing the HTML and XML files should also have +# this name. + +ECLIPSE_DOC_ID = org.doxygen.Project + +# The DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) +# at top of each HTML page. The value NO (the default) enables the index and +# the value YES disables it. Since the tabs have the same information as the +# navigation tree you can set this option to NO if you already set +# GENERATE_TREEVIEW to YES. + +DISABLE_INDEX = NO + +# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index +# structure should be generated to display hierarchical information. +# If the tag value is set to YES, a side panel will be generated +# containing a tree-like index structure (just like the one that +# is generated for HTML Help). For this to work a browser that supports +# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser). +# Windows users are probably better off using the HTML help feature. +# Since the tree basically has the same information as the tab index you +# could consider to set DISABLE_INDEX to NO when enabling this option. + +GENERATE_TREEVIEW = NONE + +# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values +# (range [0,1..20]) that doxygen will group on one line in the generated HTML +# documentation. Note that a value of 0 will completely suppress the enum +# values from appearing in the overview section. + +ENUM_VALUES_PER_LINE = 4 + +# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be +# used to set the initial width (in pixels) of the frame in which the tree +# is shown. + +TREEVIEW_WIDTH = 250 + +# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open +# links to external symbols imported via tag files in a separate window. + +EXT_LINKS_IN_WINDOW = NO + +# Use this tag to change the font size of Latex formulas included +# as images in the HTML documentation. The default is 10. Note that +# when you change the font size after a successful doxygen run you need +# to manually remove any form_*.png images from the HTML output directory +# to force them to be regenerated. + +FORMULA_FONTSIZE = 10 + +# Use the FORMULA_TRANPARENT tag to determine whether or not the images +# generated for formulas are transparent PNGs. Transparent PNGs are +# not supported properly for IE 6.0, but are supported on all modern browsers. +# Note that when changing this option you need to delete any form_*.png files +# in the HTML output before the changes have effect. + +FORMULA_TRANSPARENT = YES + +# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax +# (see http://www.mathjax.org) which uses client side Javascript for the +# rendering instead of using prerendered bitmaps. Use this if you do not +# have LaTeX installed or if you want to formulas look prettier in the HTML +# output. When enabled you may also need to install MathJax separately and +# configure the path to it using the MATHJAX_RELPATH option. + +USE_MATHJAX = NO + +# When MathJax is enabled you need to specify the location relative to the +# HTML output directory using the MATHJAX_RELPATH option. The destination +# directory should contain the MathJax.js script. For instance, if the mathjax +# directory is located at the same level as the HTML output directory, then +# MATHJAX_RELPATH should be ../mathjax. The default value points to +# the MathJax Content Delivery Network so you can quickly see the result without +# installing MathJax. +# However, it is strongly recommended to install a local +# copy of MathJax from http://www.mathjax.org before deployment. + +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest + +# The MATHJAX_EXTENSIONS tag can be used to specify one or MathJax extension +# names that should be enabled during MathJax rendering. + +MATHJAX_EXTENSIONS = + +# When the SEARCHENGINE tag is enabled doxygen will generate a search box +# for the HTML output. The underlying search engine uses javascript +# and DHTML and should work on any modern browser. Note that when using +# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets +# (GENERATE_DOCSET) there is already a search function so this one should +# typically be disabled. For large projects the javascript based search engine +# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution. + +SEARCHENGINE = NO + +# When the SERVER_BASED_SEARCH tag is enabled the search engine will be +# implemented using a PHP enabled web server instead of at the web client +# using Javascript. Doxygen will generate the search PHP script and index +# file to put on the web server. The advantage of the server +# based approach is that it scales better to large projects and allows +# full text search. The disadvantages are that it is more difficult to setup +# and does not have live searching capabilities. + +SERVER_BASED_SEARCH = NO + +#--------------------------------------------------------------------------- +# configuration options related to the LaTeX output +#--------------------------------------------------------------------------- + +# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will +# generate Latex output. + +GENERATE_LATEX = YES + +# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `latex' will be used as the default path. + +LATEX_OUTPUT = latex + +# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be +# invoked. If left blank `latex' will be used as the default command name. +# Note that when enabling USE_PDFLATEX this option is only used for +# generating bitmaps for formulas in the HTML output, but not in the +# Makefile that is written to the output directory. + +LATEX_CMD_NAME = latex + +# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to +# generate index for LaTeX. If left blank `makeindex' will be used as the +# default command name. + +MAKEINDEX_CMD_NAME = makeindex + +# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact +# LaTeX documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_LATEX = NO + +# The PAPER_TYPE tag can be used to set the paper type that is used +# by the printer. Possible values are: a4, letter, legal and +# executive. If left blank a4wide will be used. + +PAPER_TYPE = a4wide + +# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX +# packages that should be included in the LaTeX output. + +EXTRA_PACKAGES = + +# The LATEX_HEADER tag can be used to specify a personal LaTeX header for +# the generated latex document. The header should contain everything until +# the first chapter. If it is left blank doxygen will generate a +# standard header. Notice: only use this tag if you know what you are doing! + +LATEX_HEADER = + +# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for +# the generated latex document. The footer should contain everything after +# the last chapter. If it is left blank doxygen will generate a +# standard footer. Notice: only use this tag if you know what you are doing! + +LATEX_FOOTER = + +# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated +# is prepared for conversion to pdf (using ps2pdf). The pdf file will +# contain links (just like the HTML output) instead of page references +# This makes the output suitable for online browsing using a pdf viewer. + +PDF_HYPERLINKS = YES + +# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of +# plain latex in the generated Makefile. Set this option to YES to get a +# higher quality PDF documentation. + +USE_PDFLATEX = YES + +# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. +# command to the generated LaTeX files. This will instruct LaTeX to keep +# running if errors occur, instead of asking the user for help. +# This option is also used when generating formulas in HTML. + +LATEX_BATCHMODE = NO + +# If LATEX_HIDE_INDICES is set to YES then doxygen will not +# include the index chapters (such as File Index, Compound Index, etc.) +# in the output. + +LATEX_HIDE_INDICES = NO + +# If LATEX_SOURCE_CODE is set to YES then doxygen will include +# source code with syntax highlighting in the LaTeX output. +# Note that which sources are shown also depends on other settings +# such as SOURCE_BROWSER. + +LATEX_SOURCE_CODE = NO + +# The LATEX_BIB_STYLE tag can be used to specify the style to use for the +# bibliography, e.g. plainnat, or ieeetr. The default style is "plain". See +# http://en.wikipedia.org/wiki/BibTeX for more info. + +LATEX_BIB_STYLE = plain + +#--------------------------------------------------------------------------- +# configuration options related to the RTF output +#--------------------------------------------------------------------------- + +# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output +# The RTF output is optimized for Word 97 and may not look very pretty with +# other RTF readers or editors. + +GENERATE_RTF = NO + +# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `rtf' will be used as the default path. + +RTF_OUTPUT = rtf + +# If the COMPACT_RTF tag is set to YES Doxygen generates more compact +# RTF documents. This may be useful for small projects and may help to +# save some trees in general. + +COMPACT_RTF = NO + +# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated +# will contain hyperlink fields. The RTF file will +# contain links (just like the HTML output) instead of page references. +# This makes the output suitable for online browsing using WORD or other +# programs which support those fields. +# Note: wordpad (write) and others do not support links. + +RTF_HYPERLINKS = NO + +# Load style sheet definitions from file. Syntax is similar to doxygen's +# config file, i.e. a series of assignments. You only have to provide +# replacements, missing definitions are set to their default value. + +RTF_STYLESHEET_FILE = + +# Set optional variables used in the generation of an rtf document. +# Syntax is similar to doxygen's config file. + +RTF_EXTENSIONS_FILE = + +#--------------------------------------------------------------------------- +# configuration options related to the man page output +#--------------------------------------------------------------------------- + +# If the GENERATE_MAN tag is set to YES (the default) Doxygen will +# generate man pages + +GENERATE_MAN = YES + +# The MAN_OUTPUT tag is used to specify where the man pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `man' will be used as the default path. + +MAN_OUTPUT = man + +# The MAN_EXTENSION tag determines the extension that is added to +# the generated man pages (default is the subroutine's section .3) + +MAN_EXTENSION = .3 + +# If the MAN_LINKS tag is set to YES and Doxygen generates man output, +# then it will generate one additional man file for each entity +# documented in the real man page(s). These additional files +# only source the real man page, but without them the man command +# would be unable to find the correct page. The default is NO. + +MAN_LINKS = NO + +#--------------------------------------------------------------------------- +# configuration options related to the XML output +#--------------------------------------------------------------------------- + +# If the GENERATE_XML tag is set to YES Doxygen will +# generate an XML file that captures the structure of +# the code including all documentation. + +GENERATE_XML = NO + +# The XML_OUTPUT tag is used to specify where the XML pages will be put. +# If a relative path is entered the value of OUTPUT_DIRECTORY will be +# put in front of it. If left blank `xml' will be used as the default path. + +XML_OUTPUT = xml + +# The XML_SCHEMA tag can be used to specify an XML schema, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_SCHEMA = + +# The XML_DTD tag can be used to specify an XML DTD, +# which can be used by a validating XML parser to check the +# syntax of the XML files. + +XML_DTD = + +# If the XML_PROGRAMLISTING tag is set to YES Doxygen will +# dump the program listings (including syntax highlighting +# and cross-referencing information) to the XML output. Note that +# enabling this will significantly increase the size of the XML output. + +XML_PROGRAMLISTING = YES + +#--------------------------------------------------------------------------- +# configuration options for the AutoGen Definitions output +#--------------------------------------------------------------------------- + +# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will +# generate an AutoGen Definitions (see autogen.sf.net) file +# that captures the structure of the code including all +# documentation. Note that this feature is still experimental +# and incomplete at the moment. + +GENERATE_AUTOGEN_DEF = NO + +#--------------------------------------------------------------------------- +# configuration options related to the Perl module output +#--------------------------------------------------------------------------- + +# If the GENERATE_PERLMOD tag is set to YES Doxygen will +# generate a Perl module file that captures the structure of +# the code including all documentation. Note that this +# feature is still experimental and incomplete at the +# moment. + +GENERATE_PERLMOD = NO + +# If the PERLMOD_LATEX tag is set to YES Doxygen will generate +# the necessary Makefile rules, Perl scripts and LaTeX code to be able +# to generate PDF and DVI output from the Perl module output. + +PERLMOD_LATEX = NO + +# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be +# nicely formatted so it can be parsed by a human reader. +# This is useful +# if you want to understand what is going on. +# On the other hand, if this +# tag is set to NO the size of the Perl module output will be much smaller +# and Perl will parse it just the same. + +PERLMOD_PRETTY = YES + +# The names of the make variables in the generated doxyrules.make file +# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. +# This is useful so different doxyrules.make files included by the same +# Makefile don't overwrite each other's variables. + +PERLMOD_MAKEVAR_PREFIX = + +#--------------------------------------------------------------------------- +# Configuration options related to the preprocessor +#--------------------------------------------------------------------------- + +# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will +# evaluate all C-preprocessor directives found in the sources and include +# files. + +ENABLE_PREPROCESSING = YES + +# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro +# names in the source code. If set to NO (the default) only conditional +# compilation will be performed. Macro expansion can be done in a controlled +# way by setting EXPAND_ONLY_PREDEF to YES. + +MACRO_EXPANSION = YES + +# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES +# then the macro expansion is limited to the macros specified with the +# PREDEFINED and EXPAND_AS_DEFINED tags. + +EXPAND_ONLY_PREDEF = YES + +# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files +# pointed to by INCLUDE_PATH will be searched when a #include is found. + +SEARCH_INCLUDES = YES + +# The INCLUDE_PATH tag can be used to specify one or more directories that +# contain include files that are not input files but should be processed by +# the preprocessor. + +INCLUDE_PATH = + +# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard +# patterns (like *.h and *.hpp) to filter out the header-files in the +# directories. If left blank, the patterns specified with FILE_PATTERNS will +# be used. + +INCLUDE_FILE_PATTERNS = + +# The PREDEFINED tag can be used to specify one or more macro names that +# are defined before the preprocessor is started (similar to the -D option of +# gcc). The argument of the tag is a list of macros of the form: name +# or name=definition (no spaces). If the definition and the = are +# omitted =1 is assumed. To prevent a macro definition from being +# undefined via #undef or recursively expanded use the := operator +# instead of the = operator. + +PREDEFINED = DOXYGEN \ + PRINTF_ATTRIBUTE(x,y)= + +# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then +# this tag can be used to specify a list of macro names that should be expanded. +# The macro definition that is found in the sources will be used. +# Use the PREDEFINED tag if you want to use a different macro definition that +# overrules the definition found in the source code. + +EXPAND_AS_DEFINED = + +# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then +# doxygen's preprocessor will remove all references to function-like macros +# that are alone on a line, have an all uppercase name, and do not end with a +# semicolon, because these will confuse the parser if not removed. + +SKIP_FUNCTION_MACROS = YES + +#--------------------------------------------------------------------------- +# Configuration::additions related to external references +#--------------------------------------------------------------------------- + +# The TAGFILES option can be used to specify one or more tagfiles. For each +# tag file the location of the external documentation should be added. The +# format of a tag file without this location is as follows: +# +# TAGFILES = file1 file2 ... +# Adding location for the tag files is done as follows: +# +# TAGFILES = file1=loc1 "file2 = loc2" ... +# where "loc1" and "loc2" can be relative or absolute paths +# or URLs. Note that each tag file must have a unique name (where the name does +# NOT include the path). If a tag file is not located in the directory in which +# doxygen is run, you must also specify the path to the tagfile here. + +TAGFILES = + +# When a file name is specified after GENERATE_TAGFILE, doxygen will create +# a tag file that is based on the input files it reads. + +GENERATE_TAGFILE = + +# If the ALLEXTERNALS tag is set to YES all external classes will be listed +# in the class index. If set to NO only the inherited external classes +# will be listed. + +ALLEXTERNALS = NO + +# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will +# be listed. + +EXTERNAL_GROUPS = YES + +# The PERL_PATH should be the absolute path and name of the perl script +# interpreter (i.e. the result of `which perl'). + +PERL_PATH = /usr/bin/perl + +#--------------------------------------------------------------------------- +# Configuration options related to the dot tool +#--------------------------------------------------------------------------- + +# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will +# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base +# or super classes. Setting the tag to NO turns the diagrams off. Note that +# this option also works with HAVE_DOT disabled, but it is recommended to +# install and use dot, since it yields more powerful graphs. + +CLASS_DIAGRAMS = YES + +# You can define message sequence charts within doxygen comments using the \msc +# command. Doxygen will then run the mscgen tool (see +# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the +# documentation. The MSCGEN_PATH tag allows you to specify the directory where +# the mscgen tool resides. If left empty the tool is assumed to be found in the +# default search path. + +MSCGEN_PATH = + +# If set to YES, the inheritance and collaboration graphs will hide +# inheritance and usage relations if the target is undocumented +# or is not a class. + +HIDE_UNDOC_RELATIONS = YES + +# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is +# available from the path. This tool is part of Graphviz, a graph visualization +# toolkit from AT&T and Lucent Bell Labs. The other options in this section +# have no effect if this option is set to NO (the default) + +HAVE_DOT = NO + +# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is +# allowed to run in parallel. When set to 0 (the default) doxygen will +# base this on the number of processors available in the system. You can set it +# explicitly to a value larger than 0 to get control over the balance +# between CPU load and processing speed. + +DOT_NUM_THREADS = 0 + +# By default doxygen will use the Helvetica font for all dot files that +# doxygen generates. When you want a differently looking font you can specify +# the font name using DOT_FONTNAME. You need to make sure dot is able to find +# the font, which can be done by putting it in a standard location or by setting +# the DOTFONTPATH environment variable or by setting DOT_FONTPATH to the +# directory containing the font. + +DOT_FONTNAME = FreeSans + +# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs. +# The default size is 10pt. + +DOT_FONTSIZE = 10 + +# By default doxygen will tell dot to use the Helvetica font. +# If you specify a different font using DOT_FONTNAME you can use DOT_FONTPATH to +# set the path where dot can find it. + +DOT_FONTPATH = + +# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect inheritance relations. Setting this tag to YES will force the +# CLASS_DIAGRAMS tag to NO. + +CLASS_GRAPH = YES + +# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for each documented class showing the direct and +# indirect implementation dependencies (inheritance, containment, and +# class references variables) of the class with other documented classes. + +COLLABORATION_GRAPH = YES + +# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen +# will generate a graph for groups, showing the direct groups dependencies + +GROUP_GRAPHS = YES + +# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# collaboration diagrams in a style similar to the OMG's Unified Modeling +# Language. + +UML_LOOK = NO + +# If the UML_LOOK tag is enabled, the fields and methods are shown inside +# the class node. If there are many fields or methods and many nodes the +# graph may become too big to be useful. The UML_LIMIT_NUM_FIELDS +# threshold limits the number of items for each type to make the size more +# managable. Set this to 0 for no limit. Note that the threshold may be +# exceeded by 50% before the limit is enforced. + +UML_LIMIT_NUM_FIELDS = 10 + +# If set to YES, the inheritance and collaboration graphs will show the +# relations between templates and their instances. + +TEMPLATE_RELATIONS = NO + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT +# tags are set to YES then doxygen will generate a graph for each documented +# file showing the direct and indirect include dependencies of the file with +# other documented files. + +INCLUDE_GRAPH = YES + +# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and +# HAVE_DOT tags are set to YES then doxygen will generate a graph for each +# documented header file showing the documented files that directly or +# indirectly include this file. + +INCLUDED_BY_GRAPH = YES + +# If the CALL_GRAPH and HAVE_DOT options are set to YES then +# doxygen will generate a call dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable call graphs +# for selected functions only using the \callgraph command. + +CALL_GRAPH = NO + +# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then +# doxygen will generate a caller dependency graph for every global function +# or class method. Note that enabling this option will significantly increase +# the time of a run. So in most cases it will be better to enable caller +# graphs for selected functions only using the \callergraph command. + +CALLER_GRAPH = NO + +# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen +# will generate a graphical hierarchy of all classes instead of a textual one. + +GRAPHICAL_HIERARCHY = YES + +# If the DIRECTORY_GRAPH and HAVE_DOT tags are set to YES +# then doxygen will show the dependencies a directory has on other directories +# in a graphical way. The dependency relations are determined by the #include +# relations between the files in the directories. + +DIRECTORY_GRAPH = YES + +# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images +# generated by dot. Possible values are svg, png, jpg, or gif. +# If left blank png will be used. If you choose svg you need to set +# HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible in IE 9+ (other browsers do not have this requirement). + +DOT_IMAGE_FORMAT = png + +# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to +# enable generation of interactive SVG images that allow zooming and panning. +# Note that this requires a modern browser other than Internet Explorer. +# Tested and working are Firefox, Chrome, Safari, and Opera. For IE 9+ you +# need to set HTML_FILE_EXTENSION to xhtml in order to make the SVG files +# visible. Older versions of IE do not have SVG support. + +INTERACTIVE_SVG = NO + +# The tag DOT_PATH can be used to specify the path where the dot tool can be +# found. If left blank, it is assumed the dot tool can be found in the path. + +DOT_PATH = + +# The DOTFILE_DIRS tag can be used to specify one or more directories that +# contain dot files that are included in the documentation (see the +# \dotfile command). + +DOTFILE_DIRS = + +# The MSCFILE_DIRS tag can be used to specify one or more directories that +# contain msc files that are included in the documentation (see the +# \mscfile command). + +MSCFILE_DIRS = + +# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of +# nodes that will be shown in the graph. If the number of nodes in a graph +# becomes larger than this value, doxygen will truncate the graph, which is +# visualized by representing a node as a red box. Note that doxygen if the +# number of direct children of the root node in a graph is already larger than +# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note +# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH. + +DOT_GRAPH_MAX_NODES = 50 + +# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the +# graphs generated by dot. A depth value of 3 means that only nodes reachable +# from the root by following a path via at most 3 edges will be shown. Nodes +# that lay further from the root node will be omitted. Note that setting this +# option to 1 or 2 may greatly reduce the computation time needed for large +# code bases. Also note that the size of a graph can be further restricted by +# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction. + +MAX_DOT_GRAPH_DEPTH = 0 + +# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent +# background. This is disabled by default, because dot on Windows does not +# seem to support this out of the box. Warning: Depending on the platform used, +# enabling this option may lead to badly anti-aliased labels on the edges of +# a graph (i.e. they become hard to read). + +DOT_TRANSPARENT = YES + +# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# files in one run (i.e. multiple -o and -T options on the command line). This +# makes dot run faster, but since only newer versions of dot (>1.8.10) +# support this, this feature is disabled by default. + +DOT_MULTI_TARGETS = NO + +# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will +# generate a legend page explaining the meaning of the various boxes and +# arrows in the dot generated graphs. + +GENERATE_LEGEND = YES + +# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will +# remove the intermediate dot files that are used to generate +# the various graphs. + +DOT_CLEANUP = YES diff --git a/lib/replace/.checker_innocent b/lib/replace/.checker_innocent new file mode 100644 index 0000000..e619176 --- /dev/null +++ b/lib/replace/.checker_innocent @@ -0,0 +1,4 @@ +>>>MISTAKE21_create_files_6a9e68ada99a97cb +>>>MISTAKE21_os2_delete_9b2bfa7f38711d09 +>>>MISTAKE21_os2_delete_2fcc29aaa99a97cb +>>>SECURITY2_os2_delete_9b2bfa7f1c9396ca diff --git a/lib/replace/Makefile b/lib/replace/Makefile new file mode 100644 index 0000000..a123a37 --- /dev/null +++ b/lib/replace/Makefile @@ -0,0 +1,64 @@ +# simple makefile wrapper to run waf + +WAF_BINARY=$(PYTHON) ../../buildtools/bin/waf +WAF=PYTHONHASHSEED=1 WAF_MAKE=1 $(WAF_BINARY) + +all: + $(WAF) build + +install: + $(WAF) install + +uninstall: + $(WAF) uninstall + +test: + $(WAF) test $(TEST_OPTIONS) + +testenv: + $(WAF) test --testenv $(TEST_OPTIONS) + +quicktest: + $(WAF) test --quick $(TEST_OPTIONS) + +dist: + touch .tmplock + WAFLOCK=.tmplock $(WAF) dist + +distcheck: + touch .tmplock + WAFLOCK=.tmplock $(WAF) distcheck + +clean: + $(WAF) clean + +distclean: + $(WAF) distclean + +reconfigure: configure + $(WAF) reconfigure + +show_waf_options: + $(WAF) --help + +# some compatibility make targets +everything: all + +testsuite: all + +check: test + +torture: all + +# this should do an install as well, once install is finished +installcheck: test + +etags: + $(WAF) etags + +ctags: + $(WAF) ctags + +bin/%:: FORCE + $(WAF) --targets=`basename $@` +FORCE: diff --git a/lib/replace/README b/lib/replace/README new file mode 100644 index 0000000..6612eab --- /dev/null +++ b/lib/replace/README @@ -0,0 +1,128 @@ +This subsystem ensures that we can always use a certain core set of +functions and types, that are either provided by the OS or by replacement +functions / definitions in this subsystem. The aim is to try to stick +to POSIX functions in here as much as possible. Convenience functions +that are available on no platform at all belong in other subsystems +(such as LIBUTIL). + +The following functions are guaranteed: + +ftruncate +strlcpy +strlcat +mktime +rename +initgroups +memmove +strdup +setlinebuf +vsyslog +timegm +setenv +unsetenv +strndup +strnlen +waitpid +seteuid +setegid +asprintf +snprintf +vasprintf +vsnprintf +opendir +readdir +telldir +seekdir +clock_gettime +closedir +dlopen +dlclose +dlsym +dlerror +chroot +bzero +strerror +errno +mkdtemp +mkstemp (a secure one!) +pread +pwrite +chown +lchown +readline (the library) +inet_ntoa +inet_ntop +inet_pton +inet_aton +strtoll +strtoull +socketpair +strptime +getaddrinfo +freeaddrinfo +getnameinfo +gai_strerror +getifaddrs +freeifaddrs +utime +utimes +dup2 +link +readlink +symlink +realpath +poll +setproctitle +memset_s + +Types: +bool +socklen_t +uint{8,16,32,64}_t +int{8,16,32,64}_t +intptr_t +sig_atomic_t +blksize_t +blkcnt_t + +Constants: +PATH_NAME_MAX +UINT{16,32,64}_MAX +INT32_MAX +RTLD_LAZY +HOST_NAME_MAX +UINT16_MAX +UINT32_MAX +UINT64_MAX +CHAR_BIT + +Macros: +va_copy +__FUNCTION__ +__FILE__ +__LINE__ +__LINESTR__ +__location__ +__STRING +__STRINGSTRING +MIN +MAX +QSORT_CAST +ZERO_STRUCT +ZERO_STRUCTP +ZERO_STRUCTPN +ZERO_ARRAY +ARRAY_SIZE +PTR_DIFF + +Headers: +stdint.h +stdbool.h + +Optional C keywords: +volatile + +Prerequisites: +memset (for bzero) +syslog (for vsyslog) +mktemp (for mkstemp and mkdtemp) diff --git a/lib/replace/closefrom.c b/lib/replace/closefrom.c new file mode 100644 index 0000000..bef53e4 --- /dev/null +++ b/lib/replace/closefrom.c @@ -0,0 +1,138 @@ +/* + * Unix SMB/CIFS implementation. + * Samba utility functions + * Copyright (C) Volker Lendecke 2016 + * + * ** NOTE! The following LGPL license applies to the replace + * ** library. This does NOT imply that all of Samba is released + * ** under the LGPL + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "replace.h" +#include <dirent.h> +#include <unistd.h> +#include <limits.h> + +static int closefrom_sysconf(int lower) +{ + long max_files, fd; + + max_files = sysconf(_SC_OPEN_MAX); + if (max_files == -1) { + max_files = 65536; + } + + for (fd=lower; fd<max_files; fd++) { + close(fd); + } + + return 0; +} + +static int closefrom_procfs(int lower) +{ + DIR *dirp; + int dir_fd; + struct dirent *dp; + int *fds = NULL; + size_t num_fds = 0; + size_t fd_array_size = 0; + size_t i; + int ret = ENOMEM; + + dirp = opendir("/proc/self/fd"); + if (dirp == NULL) { + return errno; + } + + dir_fd = dirfd(dirp); + if (dir_fd == -1) { + ret = errno; + goto fail; + } + + while ((dp = readdir(dirp)) != NULL) { + char *endptr; + unsigned long long fd; + + errno = 0; + + fd = strtoull(dp->d_name, &endptr, 10); + if ((fd == 0) && (errno == EINVAL)) { + continue; + } + if ((fd == ULLONG_MAX) && (errno == ERANGE)) { + continue; + } + if (*endptr != '\0') { + continue; + } + if (fd == dir_fd) { + continue; + } + if (fd > INT_MAX) { + continue; + } + if (fd < lower) { + continue; + } + + if (num_fds >= (fd_array_size / sizeof(int))) { + void *tmp; + + if (fd_array_size == 0) { + fd_array_size = 16 * sizeof(int); + } else { + if (fd_array_size + fd_array_size < + fd_array_size) { + /* overflow */ + goto fail; + } + fd_array_size = fd_array_size + fd_array_size; + } + + tmp = realloc(fds, fd_array_size); + if (tmp == NULL) { + goto fail; + } + fds = tmp; + } + + fds[num_fds++] = fd; + } + + for (i=0; i<num_fds; i++) { + close(fds[i]); + } + + ret = 0; +fail: + closedir(dirp); + free(fds); + return ret; +} + +int rep_closefrom(int lower) +{ + int ret; + + ret = closefrom_procfs(lower); + if (ret == 0) { + return 0; + } + + return closefrom_sysconf(lower); +} diff --git a/lib/replace/configure b/lib/replace/configure new file mode 100755 index 0000000..af76185 --- /dev/null +++ b/lib/replace/configure @@ -0,0 +1,28 @@ +#!/bin/sh + +PREVPATH=$(dirname $0) + +if [ -f $PREVPATH/../../buildtools/bin/waf ]; then + WAF=../../buildtools/bin/waf +elif [ -f $PREVPATH/buildtools/bin/waf ]; then + WAF=./buildtools/bin/waf +else + echo "replace: Unable to find waf" + exit 1 +fi + +# using JOBS=1 gives maximum compatibility with +# systems like AIX which have broken threading in python +JOBS=1 +export JOBS + +# Make sure we don't have any library preloaded. +unset LD_PRELOAD + +# Make sure we get stable hashes +PYTHONHASHSEED=1 +export PYTHONHASHSEED + +cd . || exit 1 +$PYTHON $WAF configure "$@" || exit 1 +cd $PREVPATH diff --git a/lib/replace/dlfcn.c b/lib/replace/dlfcn.c new file mode 100644 index 0000000..88431ed --- /dev/null +++ b/lib/replace/dlfcn.c @@ -0,0 +1,76 @@ +/* + Unix SMB/CIFS implementation. + Samba system utilities + Copyright (C) Andrew Tridgell 1992-1998 + Copyright (C) Jeremy Allison 1998-2002 + Copyright (C) Jelmer Vernooij 2006 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" +#ifdef HAVE_DL_H +#include <dl.h> +#endif + +#ifndef HAVE_DLOPEN +#ifdef DLOPEN_TAKES_UNSIGNED_FLAGS +void *rep_dlopen(const char *name, unsigned int flags) +#else +void *rep_dlopen(const char *name, int flags) +#endif +{ +#ifdef HAVE_SHL_LOAD + if (name == NULL) + return PROG_HANDLE; + return (void *)shl_load(name, flags, 0); +#else + return NULL; +#endif +} +#endif + +#ifndef HAVE_DLSYM +void *rep_dlsym(void *handle, const char *symbol) +{ +#ifdef HAVE_SHL_FINDSYM + void *sym_addr; + if (!shl_findsym((shl_t *)&handle, symbol, TYPE_UNDEFINED, &sym_addr)) + return sym_addr; +#endif + return NULL; +} +#endif + +#ifndef HAVE_DLERROR +char *rep_dlerror(void) +{ + return "dynamic loading of objects not supported on this platform"; +} +#endif + +#ifndef HAVE_DLCLOSE +int rep_dlclose(void *handle) +{ +#ifdef HAVE_SHL_CLOSE + return shl_unload((shl_t)handle); +#else + return 0; +#endif +} +#endif diff --git a/lib/replace/getaddrinfo.c b/lib/replace/getaddrinfo.c new file mode 100644 index 0000000..8440d8e --- /dev/null +++ b/lib/replace/getaddrinfo.c @@ -0,0 +1,493 @@ +/* +PostgreSQL Database Management System +(formerly known as Postgres, then as Postgres95) + +Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this paragraph +and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, +EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS +TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + +*/ + +/*------------------------------------------------------------------------- + * + * getaddrinfo.c + * Support getaddrinfo() on platforms that don't have it. + * + * We also supply getnameinfo() here, assuming that the platform will have + * it if and only if it has getaddrinfo(). If this proves false on some + * platform, we'll need to split this file and provide a separate configure + * test for getnameinfo(). + * + * Copyright (c) 2003-2007, PostgreSQL Global Development Group + * + * Copyright (C) 2007 Jeremy Allison. + * Modified to return multiple IPv4 addresses for Samba. + * + *------------------------------------------------------------------------- + */ + +#include "replace.h" +#include "system/network.h" + +#ifndef SMB_MALLOC +#define SMB_MALLOC(s) malloc(s) +#endif + +#ifndef SMB_STRDUP +#define SMB_STRDUP(s) strdup(s) +#endif + +static int check_hostent_err(struct hostent *hp) +{ + if (!hp) { + switch (h_errno) { + case HOST_NOT_FOUND: + case NO_DATA: + return EAI_NONAME; + case TRY_AGAIN: + return EAI_AGAIN; + case NO_RECOVERY: + default: + return EAI_FAIL; + } + } + if (!hp->h_name || hp->h_addrtype != AF_INET) { + return EAI_FAIL; + } + return 0; +} + +static char *canon_name_from_hostent(struct hostent *hp, + int *perr) +{ + char *ret = NULL; + + *perr = check_hostent_err(hp); + if (*perr) { + return NULL; + } + ret = SMB_STRDUP(hp->h_name); + if (!ret) { + *perr = EAI_MEMORY; + } + return ret; +} + +static char *get_my_canon_name(int *perr) +{ + char name[HOST_NAME_MAX+1]; + + if (gethostname(name, HOST_NAME_MAX) == -1) { + *perr = EAI_FAIL; + return NULL; + } + /* Ensure null termination. */ + name[HOST_NAME_MAX] = '\0'; + return canon_name_from_hostent(gethostbyname(name), perr); +} + +static char *get_canon_name_from_addr(struct in_addr ip, + int *perr) +{ + return canon_name_from_hostent( + gethostbyaddr(&ip, sizeof(ip), AF_INET), + perr); +} + +static struct addrinfo *alloc_entry(const struct addrinfo *hints, + struct in_addr ip, + unsigned short port) +{ + struct sockaddr_in *psin = NULL; + struct addrinfo *ai = SMB_MALLOC(sizeof(*ai)); + + if (!ai) { + return NULL; + } + memset(ai, '\0', sizeof(*ai)); + + psin = SMB_MALLOC(sizeof(*psin)); + if (!psin) { + free(ai); + return NULL; + } + + memset(psin, '\0', sizeof(*psin)); + + psin->sin_family = AF_INET; + psin->sin_port = htons(port); + psin->sin_addr = ip; + + ai->ai_flags = 0; + ai->ai_family = AF_INET; + ai->ai_socktype = hints->ai_socktype; + ai->ai_protocol = hints->ai_protocol; + ai->ai_addrlen = sizeof(*psin); + ai->ai_addr = (struct sockaddr *) psin; + ai->ai_canonname = NULL; + ai->ai_next = NULL; + + return ai; +} + +/* + * get address info for a single ipv4 address. + * + * Bugs: - servname can only be a number, not text. + */ + +static int getaddr_info_single_addr(const char *service, + uint32_t addr, + const struct addrinfo *hints, + struct addrinfo **res) +{ + + struct addrinfo *ai = NULL; + struct in_addr ip; + unsigned short port = 0; + + if (service) { + port = (unsigned short)atoi(service); + } + ip.s_addr = htonl(addr); + + ai = alloc_entry(hints, ip, port); + if (!ai) { + return EAI_MEMORY; + } + + /* If we're asked for the canonical name, + * make sure it returns correctly. */ + if (!(hints->ai_flags & AI_NUMERICSERV) && + hints->ai_flags & AI_CANONNAME) { + int err; + if (addr == INADDR_LOOPBACK || addr == INADDR_ANY) { + ai->ai_canonname = get_my_canon_name(&err); + } else { + ai->ai_canonname = + get_canon_name_from_addr(ip,&err); + } + if (ai->ai_canonname == NULL) { + freeaddrinfo(ai); + return err; + } + } + + *res = ai; + return 0; +} + +/* + * get address info for multiple ipv4 addresses. + * + * Bugs: - servname can only be a number, not text. + */ + +static int getaddr_info_name(const char *node, + const char *service, + const struct addrinfo *hints, + struct addrinfo **res) +{ + struct addrinfo *listp = NULL, *prevp = NULL; + char **pptr = NULL; + int err; + struct hostent *hp = NULL; + unsigned short port = 0; + + if (service) { + port = (unsigned short)atoi(service); + } + + hp = gethostbyname(node); + err = check_hostent_err(hp); + if (err) { + return err; + } + + for(pptr = hp->h_addr_list; *pptr; pptr++) { + struct in_addr ip = *(struct in_addr *)*pptr; + struct addrinfo *ai = alloc_entry(hints, ip, port); + + if (!ai) { + freeaddrinfo(listp); + return EAI_MEMORY; + } + + if (!listp) { + listp = ai; + prevp = ai; + ai->ai_canonname = SMB_STRDUP(hp->h_name); + if (!ai->ai_canonname) { + freeaddrinfo(listp); + return EAI_MEMORY; + } + } else { + prevp->ai_next = ai; + prevp = ai; + } + } + *res = listp; + return 0; +} + +/* + * get address info for ipv4 sockets. + * + * Bugs: - servname can only be a number, not text. + */ + +int rep_getaddrinfo(const char *node, + const char *service, + const struct addrinfo * hintp, + struct addrinfo ** res) +{ + struct addrinfo hints; + + /* Setup the hints struct. */ + if (hintp == NULL) { + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_INET; + hints.ai_socktype = SOCK_STREAM; + } else { + memcpy(&hints, hintp, sizeof(hints)); + } + + if (hints.ai_family != AF_INET && hints.ai_family != AF_UNSPEC) { + return EAI_FAMILY; + } + + if (hints.ai_socktype == 0) { + hints.ai_socktype = SOCK_STREAM; + } + + if (!node && !service) { + return EAI_NONAME; + } + + if (node) { + if (node[0] == '\0') { + return getaddr_info_single_addr(service, + INADDR_ANY, + &hints, + res); + } else if (hints.ai_flags & AI_NUMERICHOST) { + struct in_addr ip; + if (!inet_aton(node, &ip)) { + return EAI_FAIL; + } + return getaddr_info_single_addr(service, + ntohl(ip.s_addr), + &hints, + res); + } else { + return getaddr_info_name(node, + service, + &hints, + res); + } + } else if (hints.ai_flags & AI_PASSIVE) { + return getaddr_info_single_addr(service, + INADDR_ANY, + &hints, + res); + } + return getaddr_info_single_addr(service, + INADDR_LOOPBACK, + &hints, + res); +} + + +void rep_freeaddrinfo(struct addrinfo *res) +{ + struct addrinfo *next = NULL; + + for (;res; res = next) { + next = res->ai_next; + free(res->ai_canonname); + free(res->ai_addr); + free(res); + } +} + + +const char *rep_gai_strerror(int errcode) +{ +#ifdef HAVE_HSTRERROR + int hcode; + + switch (errcode) + { + case EAI_NONAME: + hcode = HOST_NOT_FOUND; + break; + case EAI_AGAIN: + hcode = TRY_AGAIN; + break; + case EAI_FAIL: + default: + hcode = NO_RECOVERY; + break; + } + + return hstrerror(hcode); +#else /* !HAVE_HSTRERROR */ + + switch (errcode) + { + case EAI_NONAME: + return "Unknown host"; + case EAI_AGAIN: + return "Host name lookup failure"; +#ifdef EAI_BADFLAGS + case EAI_BADFLAGS: + return "Invalid argument"; +#endif +#ifdef EAI_FAMILY + case EAI_FAMILY: + return "Address family not supported"; +#endif +#ifdef EAI_MEMORY + case EAI_MEMORY: + return "Not enough memory"; +#endif +#ifdef EAI_NODATA + case EAI_NODATA: + return "No host data of that type was found"; +#endif +#ifdef EAI_SERVICE + case EAI_SERVICE: + return "Class type not found"; +#endif +#ifdef EAI_SOCKTYPE + case EAI_SOCKTYPE: + return "Socket type not supported"; +#endif + default: + return "Unknown server error"; + } +#endif /* HAVE_HSTRERROR */ +} + +static int gethostnameinfo(const struct sockaddr *sa, + char *node, + size_t nodelen, + int flags) +{ + int ret = -1; + char *p = NULL; + + if (!(flags & NI_NUMERICHOST)) { + struct hostent *hp = gethostbyaddr( + &((struct sockaddr_in *)sa)->sin_addr, + sizeof(struct in_addr), + sa->sa_family); + ret = check_hostent_err(hp); + if (ret == 0) { + /* Name looked up successfully. */ + ret = snprintf(node, nodelen, "%s", hp->h_name); + if (ret < 0 || (size_t)ret >= nodelen) { + return EAI_MEMORY; + } + if (flags & NI_NOFQDN) { + p = strchr(node,'.'); + if (p) { + *p = '\0'; + } + } + return 0; + } + + if (flags & NI_NAMEREQD) { + /* If we require a name and didn't get one, + * automatically fail. */ + return ret; + } + /* Otherwise just fall into the numeric host code... */ + } + p = inet_ntoa(((struct sockaddr_in *)sa)->sin_addr); + ret = snprintf(node, nodelen, "%s", p); + if (ret < 0 || (size_t)ret >= nodelen) { + return EAI_MEMORY; + } + return 0; +} + +static int getservicenameinfo(const struct sockaddr *sa, + char *service, + size_t servicelen, + int flags) +{ + int ret = -1; + int port = ntohs(((struct sockaddr_in *)sa)->sin_port); + + if (!(flags & NI_NUMERICSERV)) { + struct servent *se = getservbyport( + port, + (flags & NI_DGRAM) ? "udp" : "tcp"); + if (se && se->s_name) { + /* Service name looked up successfully. */ + ret = snprintf(service, servicelen, "%s", se->s_name); + if (ret < 0 || (size_t)ret >= servicelen) { + return EAI_MEMORY; + } + return 0; + } + /* Otherwise just fall into the numeric service code... */ + } + ret = snprintf(service, servicelen, "%d", port); + if (ret < 0 || (size_t)ret >= servicelen) { + return EAI_MEMORY; + } + return 0; +} + +/* + * Convert an ipv4 address to a hostname. + * + * Bugs: - No IPv6 support. + */ +int rep_getnameinfo(const struct sockaddr *sa, socklen_t salen, + char *node, size_t nodelen, + char *service, size_t servicelen, int flags) +{ + + /* Invalid arguments. */ + if (sa == NULL || (node == NULL && service == NULL)) { + return EAI_FAIL; + } + + if (sa->sa_family != AF_INET) { + return EAI_FAIL; + } + + if (salen < sizeof(struct sockaddr_in)) { + return EAI_FAIL; + } + + if (node) { + return gethostnameinfo(sa, node, nodelen, flags); + } + + if (service) { + return getservicenameinfo(sa, service, servicelen, flags); + } + return 0; +} diff --git a/lib/replace/getaddrinfo.h b/lib/replace/getaddrinfo.h new file mode 100644 index 0000000..cf040da --- /dev/null +++ b/lib/replace/getaddrinfo.h @@ -0,0 +1,91 @@ +/* +PostgreSQL Database Management System +(formerly known as Postgres, then as Postgres95) + +Portions Copyright (c) 1996-2005, The PostgreSQL Global Development Group + +Portions Copyright (c) 1994, The Regents of the University of California + +Permission to use, copy, modify, and distribute this software and its +documentation for any purpose, without fee, and without a written agreement +is hereby granted, provided that the above copyright notice and this paragraph +and the following two paragraphs appear in all copies. + +IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR +DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING +LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, +EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGE. + +THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, +INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY +AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS +ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS +TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. + +*/ + +/*------------------------------------------------------------------------- + * + * getaddrinfo.h + * Support getaddrinfo() on platforms that don't have it. + * + * Note: we use our own routines on platforms that don't HAVE_STRUCT_ADDRINFO, + * whether or not the library routine getaddrinfo() can be found. This + * policy is needed because on some platforms a manually installed libbind.a + * may provide getaddrinfo(), yet the system headers may not provide the + * struct definitions needed to call it. To avoid conflict with the libbind + * definition in such cases, we rename our routines to pg_xxx() via macros. + * + +in lib/replace we use rep_xxx() + + * This code will also work on platforms where struct addrinfo is defined + * in the system headers but no getaddrinfo() can be located. + * + * Copyright (c) 2003-2007, PostgreSQL Global Development Group + * + *------------------------------------------------------------------------- + */ +#ifndef GETADDRINFO_H +#define GETADDRINFO_H + +#ifndef HAVE_GETADDRINFO + +/* Rename private copies per comments above */ +#ifdef getaddrinfo +#undef getaddrinfo +#endif +#define getaddrinfo rep_getaddrinfo +#define HAVE_GETADDRINFO + +#ifdef freeaddrinfo +#undef freeaddrinfo +#endif +#define freeaddrinfo rep_freeaddrinfo +#define HAVE_FREEADDRINFO + +#ifdef gai_strerror +#undef gai_strerror +#endif +#define gai_strerror rep_gai_strerror +#define HAVE_GAI_STRERROR + +#ifdef getnameinfo +#undef getnameinfo +#endif +#define getnameinfo rep_getnameinfo +#ifndef HAVE_GETNAMEINFO +#define HAVE_GETNAMEINFO +#endif + +extern int rep_getaddrinfo(const char *node, const char *service, + const struct addrinfo * hints, struct addrinfo ** res); +extern void rep_freeaddrinfo(struct addrinfo * res); +extern const char *rep_gai_strerror(int errcode); +extern int rep_getnameinfo(const struct sockaddr * sa, socklen_t salen, + char *node, size_t nodelen, + char *service, size_t servicelen, int flags); +#endif /* HAVE_GETADDRINFO */ + +#endif /* GETADDRINFO_H */ diff --git a/lib/replace/getifaddrs.c b/lib/replace/getifaddrs.c new file mode 100644 index 0000000..a55ef7e --- /dev/null +++ b/lib/replace/getifaddrs.c @@ -0,0 +1,383 @@ +/* + Unix SMB/CIFS implementation. + Samba utility functions + Copyright (C) Andrew Tridgell 1998 + Copyright (C) Jeremy Allison 2007 + Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2007 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" +#include "system/network.h" + +#include <unistd.h> +#include <stdio.h> +#include <sys/types.h> + +#ifdef HAVE_SYS_TIME_H +#include <sys/time.h> +#endif + +#ifndef SIOCGIFCONF +#ifdef HAVE_SYS_SOCKIO_H +#include <sys/sockio.h> +#endif +#endif + +#ifdef HAVE_IFACE_GETIFADDRS +#define _FOUND_IFACE_ANY +#else + +void rep_freeifaddrs(struct ifaddrs *ifp) +{ + if (ifp != NULL) { + free(ifp->ifa_name); + free(ifp->ifa_addr); + free(ifp->ifa_netmask); + free(ifp->ifa_dstaddr); + freeifaddrs(ifp->ifa_next); + free(ifp); + } +} + +static struct sockaddr *sockaddr_dup(struct sockaddr *sa) +{ + struct sockaddr *ret; + socklen_t socklen; +#ifdef HAVE_SOCKADDR_SA_LEN + socklen = sa->sa_len; +#else + socklen = sizeof(struct sockaddr_storage); +#endif + ret = calloc(1, socklen); + if (ret == NULL) + return NULL; + memcpy(ret, sa, socklen); + return ret; +} +#endif + +#ifdef HAVE_IFACE_IFCONF + +/* this works for Linux 2.2, Solaris 2.5, SunOS4, HPUX 10.20, OSF1 + V4.0, Ultrix 4.4, SCO Unix 3.2, IRIX 6.4 and FreeBSD 3.2. + + It probably also works on any BSD style system. */ + +int rep_getifaddrs(struct ifaddrs **ifap) +{ + struct ifconf ifc; + char buff[8192]; + int fd, i, n; + struct ifreq *ifr=NULL; + struct ifaddrs *curif; + struct ifaddrs *lastif = NULL; + + *ifap = NULL; + + if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { + return -1; + } + + ifc.ifc_len = sizeof(buff); + ifc.ifc_buf = buff; + + if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { + close(fd); + return -1; + } + + ifr = ifc.ifc_req; + + n = ifc.ifc_len / sizeof(struct ifreq); + + /* Loop through interfaces, looking for given IP address */ + for (i=n-1; i>=0; i--) { + if (ioctl(fd, SIOCGIFFLAGS, &ifr[i]) == -1) { + freeifaddrs(*ifap); + close(fd); + return -1; + } + + curif = calloc(1, sizeof(struct ifaddrs)); + if (curif == NULL) { + freeifaddrs(*ifap); + close(fd); + return -1; + } + curif->ifa_name = strdup(ifr[i].ifr_name); + if (curif->ifa_name == NULL) { + free(curif); + freeifaddrs(*ifap); + close(fd); + return -1; + } + curif->ifa_flags = ifr[i].ifr_flags; + curif->ifa_dstaddr = NULL; + curif->ifa_data = NULL; + curif->ifa_next = NULL; + + curif->ifa_addr = NULL; + if (ioctl(fd, SIOCGIFADDR, &ifr[i]) != -1) { + curif->ifa_addr = sockaddr_dup(&ifr[i].ifr_addr); + if (curif->ifa_addr == NULL) { + free(curif->ifa_name); + free(curif); + freeifaddrs(*ifap); + close(fd); + return -1; + } + } + + curif->ifa_netmask = NULL; + if (ioctl(fd, SIOCGIFNETMASK, &ifr[i]) != -1) { + curif->ifa_netmask = sockaddr_dup(&ifr[i].ifr_addr); + if (curif->ifa_netmask == NULL) { + if (curif->ifa_addr != NULL) { + free(curif->ifa_addr); + } + free(curif->ifa_name); + free(curif); + freeifaddrs(*ifap); + close(fd); + return -1; + } + } + + if (lastif == NULL) { + *ifap = curif; + } else { + lastif->ifa_next = curif; + } + lastif = curif; + } + + close(fd); + + return 0; +} + +#define _FOUND_IFACE_ANY +#endif /* HAVE_IFACE_IFCONF */ +#ifdef HAVE_IFACE_IFREQ + +#ifndef I_STR +#include <sys/stropts.h> +#endif + +/**************************************************************************** +this should cover most of the streams based systems +Thanks to Andrej.Borsenkow@mow.siemens.ru for several ideas in this code +****************************************************************************/ +int rep_getifaddrs(struct ifaddrs **ifap) +{ + struct ifreq ifreq; + struct strioctl strioctl; + char buff[8192]; + int fd, i, n; + struct ifreq *ifr=NULL; + struct ifaddrs *curif; + struct ifaddrs *lastif = NULL; + + *ifap = NULL; + + if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { + return -1; + } + + strioctl.ic_cmd = SIOCGIFCONF; + strioctl.ic_dp = buff; + strioctl.ic_len = sizeof(buff); + if (ioctl(fd, I_STR, &strioctl) < 0) { + close(fd); + return -1; + } + + /* we can ignore the possible sizeof(int) here as the resulting + number of interface structures won't change */ + n = strioctl.ic_len / sizeof(struct ifreq); + + /* we will assume that the kernel returns the length as an int + at the start of the buffer if the offered size is a + multiple of the structure size plus an int */ + if (n*sizeof(struct ifreq) + sizeof(int) == strioctl.ic_len) { + ifr = (struct ifreq *)(buff + sizeof(int)); + } else { + ifr = (struct ifreq *)buff; + } + + /* Loop through interfaces */ + + for (i = 0; i<n; i++) { + ifreq = ifr[i]; + + curif = calloc(1, sizeof(struct ifaddrs)); + if (lastif == NULL) { + *ifap = curif; + } else { + lastif->ifa_next = curif; + } + + strioctl.ic_cmd = SIOCGIFFLAGS; + strioctl.ic_dp = (char *)&ifreq; + strioctl.ic_len = sizeof(struct ifreq); + if (ioctl(fd, I_STR, &strioctl) != 0) { + freeifaddrs(*ifap); + return -1; + } + + curif->ifa_flags = ifreq.ifr_flags; + + strioctl.ic_cmd = SIOCGIFADDR; + strioctl.ic_dp = (char *)&ifreq; + strioctl.ic_len = sizeof(struct ifreq); + if (ioctl(fd, I_STR, &strioctl) != 0) { + freeifaddrs(*ifap); + return -1; + } + + curif->ifa_name = strdup(ifreq.ifr_name); + curif->ifa_addr = sockaddr_dup(&ifreq.ifr_addr); + curif->ifa_dstaddr = NULL; + curif->ifa_data = NULL; + curif->ifa_next = NULL; + curif->ifa_netmask = NULL; + + strioctl.ic_cmd = SIOCGIFNETMASK; + strioctl.ic_dp = (char *)&ifreq; + strioctl.ic_len = sizeof(struct ifreq); + if (ioctl(fd, I_STR, &strioctl) != 0) { + freeifaddrs(*ifap); + return -1; + } + + curif->ifa_netmask = sockaddr_dup(&ifreq.ifr_addr); + + lastif = curif; + } + + close(fd); + + return 0; +} + +#define _FOUND_IFACE_ANY +#endif /* HAVE_IFACE_IFREQ */ +#ifdef HAVE_IFACE_AIX + +/**************************************************************************** +this one is for AIX (tested on 4.2) +****************************************************************************/ +int rep_getifaddrs(struct ifaddrs **ifap) +{ + char buff[8192]; + int fd, i; + struct ifconf ifc; + struct ifreq *ifr=NULL; + struct ifaddrs *curif; + struct ifaddrs *lastif = NULL; + + *ifap = NULL; + + if ((fd = socket(AF_INET, SOCK_DGRAM, 0)) == -1) { + return -1; + } + + ifc.ifc_len = sizeof(buff); + ifc.ifc_buf = buff; + + if (ioctl(fd, SIOCGIFCONF, &ifc) != 0) { + close(fd); + return -1; + } + + ifr = ifc.ifc_req; + + /* Loop through interfaces */ + i = ifc.ifc_len; + + while (i > 0) { + unsigned int inc; + + inc = ifr->ifr_addr.sa_len; + + if (ioctl(fd, SIOCGIFADDR, ifr) != 0) { + freeaddrinfo(*ifap); + return -1; + } + + curif = calloc(1, sizeof(struct ifaddrs)); + if (lastif == NULL) { + *ifap = curif; + } else { + lastif->ifa_next = curif; + } + + curif->ifa_name = strdup(ifr->ifr_name); + curif->ifa_addr = sockaddr_dup(&ifr->ifr_addr); + curif->ifa_dstaddr = NULL; + curif->ifa_data = NULL; + curif->ifa_netmask = NULL; + curif->ifa_next = NULL; + + if (ioctl(fd, SIOCGIFFLAGS, ifr) != 0) { + freeaddrinfo(*ifap); + return -1; + } + + curif->ifa_flags = ifr->ifr_flags; + + if (ioctl(fd, SIOCGIFNETMASK, ifr) != 0) { + freeaddrinfo(*ifap); + return -1; + } + + curif->ifa_netmask = sockaddr_dup(&ifr->ifr_addr); + + lastif = curif; + + next: + /* + * Patch from Archie Cobbs (archie@whistle.com). The + * addresses in the SIOCGIFCONF interface list have a + * minimum size. Usually this doesn't matter, but if + * your machine has tunnel interfaces, etc. that have + * a zero length "link address", this does matter. */ + + if (inc < sizeof(ifr->ifr_addr)) + inc = sizeof(ifr->ifr_addr); + inc += IFNAMSIZ; + + ifr = (struct ifreq*) (((char*) ifr) + inc); + i -= inc; + } + + close(fd); + return 0; +} + +#define _FOUND_IFACE_ANY +#endif /* HAVE_IFACE_AIX */ +#ifndef _FOUND_IFACE_ANY +int rep_getifaddrs(struct ifaddrs **ifap) +{ + errno = ENOSYS; + return -1; +} +#endif diff --git a/lib/replace/hdr_replace.h b/lib/replace/hdr_replace.h new file mode 100644 index 0000000..6cfa50f --- /dev/null +++ b/lib/replace/hdr_replace.h @@ -0,0 +1,2 @@ +/* this is a replacement header for a missing system header */ +#include "replace.h" diff --git a/lib/replace/inet_aton.c b/lib/replace/inet_aton.c new file mode 100644 index 0000000..c6b3bb1 --- /dev/null +++ b/lib/replace/inet_aton.c @@ -0,0 +1,33 @@ +/* + * Unix SMB/CIFS implementation. + * replacement functions + * Copyright (C) Michael Adam <obnox@samba.org> 2008 + * + * ** NOTE! The following LGPL license applies to the replace + * ** library. This does NOT imply that all of Samba is released + * ** under the LGPL + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "replace.h" +#include "system/network.h" + +/** + * We know that we have inet_pton from earlier libreplace checks. + */ +int rep_inet_aton(const char *src, struct in_addr *dst) +{ + return (inet_pton(AF_INET, src, dst) > 0) ? 1 : 0; +} diff --git a/lib/replace/inet_ntoa.c b/lib/replace/inet_ntoa.c new file mode 100644 index 0000000..e3b80eb --- /dev/null +++ b/lib/replace/inet_ntoa.c @@ -0,0 +1,39 @@ +/* + * Unix SMB/CIFS implementation. + * replacement routines for broken systems + * Copyright (C) Andrew Tridgell 2003 + * Copyright (C) Michael Adam 2008 + * + * ** NOTE! The following LGPL license applies to the replace + * ** library. This does NOT imply that all of Samba is released + * ** under the LGPL + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "replace.h" +#include "system/network.h" + +/** + * NOTE: this is not thread safe, but it can't be, either + * since it returns a pointer to static memory. + */ +char *rep_inet_ntoa(struct in_addr ip) +{ + uint8_t *p = (uint8_t *)&ip.s_addr; + static char buf[18]; + slprintf(buf, 17, "%d.%d.%d.%d", + (int)p[0], (int)p[1], (int)p[2], (int)p[3]); + return buf; +} diff --git a/lib/replace/inet_ntop.c b/lib/replace/inet_ntop.c new file mode 100644 index 0000000..d6d4158 --- /dev/null +++ b/lib/replace/inet_ntop.c @@ -0,0 +1,193 @@ +/* + * Copyright (C) 1996-2001 Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM + * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, + * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + +#include "replace.h" +#include "system/network.h" + +#define NS_INT16SZ 2 +#define NS_IN6ADDRSZ 16 + +/* + * WARNING: Don't even consider trying to compile this on a system where + * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. + */ + +static const char *inet_ntop4(const unsigned char *src, char *dst, + socklen_t size); + +#ifdef AF_INET6 +static const char *inet_ntop6(const unsigned char *src, char *dst, + socklen_t size); +#endif + +/* char * + * isc_net_ntop(af, src, dst, size) + * convert a network format address to presentation format. + * return: + * pointer to presentation format address (`dst'), or NULL (see errno). + * author: + * Paul Vixie, 1996. + */ +const char * +rep_inet_ntop(int af, const void *src, char *dst, socklen_t size) +{ + switch (af) { + case AF_INET: + return (inet_ntop4(src, dst, size)); +#ifdef AF_INET6 + case AF_INET6: + return (inet_ntop6(src, dst, size)); +#endif + default: + errno = EAFNOSUPPORT; + return (NULL); + } + /* NOTREACHED */ +} + +/* const char * + * inet_ntop4(src, dst, size) + * format an IPv4 address + * return: + * `dst' (as a const) + * author: + * Paul Vixie, 1996. + */ +static const char * +inet_ntop4(const unsigned char *src, char *dst, socklen_t size) +{ + char tmp[sizeof("255.255.255.255")]; + size_t len; + + len = snprintf(tmp, + sizeof(tmp), + "%hhu.%hhu.%hhu.%hhu", + src[0], + src[1], + src[2], + src[3]); + if (len >= size) { + errno = ENOSPC; + return (NULL); + } + memcpy(dst, tmp, len + 1); + + return (dst); +} + +/* const char * + * isc_inet_ntop6(src, dst, size) + * convert IPv6 binary address into presentation (printable) format + * author: + * Paul Vixie, 1996. + */ +#ifdef AF_INET6 +static const char * +inet_ntop6(const unsigned char *src, char *dst, socklen_t size) +{ + /* + * Note that int32_t and int16_t need only be "at least" large enough + * to contain a value of the specified size. On some systems, like + * Crays, there is no such thing as an integer variable with 16 bits. + * Keep this in mind if you think this function should have been coded + * to use pointer overlays. All the world's not a VAX. + */ + char tmp[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255"], *tp; + struct { int base, len; } best, cur; + unsigned int words[NS_IN6ADDRSZ / NS_INT16SZ]; + int i, inc; + + /* + * Preprocess: + * Copy the input (bytewise) array into a wordwise array. + * Find the longest run of 0x00's in src[] for :: shorthanding. + */ + memset(words, '\0', sizeof words); + for (i = 0; i < NS_IN6ADDRSZ; i++) + words[i / 2] |= (src[i] << ((1 - (i % 2)) << 3)); + best.base = -1; + best.len = 0; + cur.base = -1; + cur.len = 0; + for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { + if (words[i] == 0) { + if (cur.base == -1) + cur.base = i, cur.len = 1; + else + cur.len++; + } else { + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + cur.base = -1; + } + } + } + if (cur.base != -1) { + if (best.base == -1 || cur.len > best.len) + best = cur; + } + if (best.base != -1 && best.len < 2) + best.base = -1; + + /* + * Format the result. + */ + tp = tmp; + for (i = 0; i < (NS_IN6ADDRSZ / NS_INT16SZ); i++) { + /* Are we inside the best run of 0x00's? */ + if (best.base != -1 && i >= best.base && + i < (best.base + best.len)) { + if (i == best.base) + *tp++ = ':'; + continue; + } + /* Are we following an initial run of 0x00s or any real hex? */ + if (i != 0) + *tp++ = ':'; + /* Is this address an encapsulated IPv4? */ + if (i == 6 && best.base == 0 && + (best.len == 6 || (best.len == 5 && words[5] == 0xffff))) { + if (!inet_ntop4(src+12, tp, sizeof tmp - (tp - tmp))) + return (NULL); + tp += strlen(tp); + break; + } + inc = snprintf(tp, 5, "%x", words[i]); + if (inc >= 5) { + abort(); + } + tp += inc; + } + /* Was it a trailing run of 0x00's? */ + if (best.base != -1 && (best.base + best.len) == + (NS_IN6ADDRSZ / NS_INT16SZ)) + *tp++ = ':'; + *tp++ = '\0'; + + /* + * Check for overflow, copy, and we're done. + */ + if ((size_t)(tp - tmp) > size) { + errno = ENOSPC; + return (NULL); + } + memcpy(dst, tmp, tp - tmp); + return (dst); +} +#endif /* AF_INET6 */ diff --git a/lib/replace/inet_pton.c b/lib/replace/inet_pton.c new file mode 100644 index 0000000..80e4865 --- /dev/null +++ b/lib/replace/inet_pton.c @@ -0,0 +1,213 @@ +/* + * Copyright (C) 1996-2001 Internet Software Consortium. + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SOFTWARE CONSORTIUM + * DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL + * INTERNET SOFTWARE CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT, + * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING + * FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, + * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION + * WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include "replace.h" +#include "system/network.h" + +#define NS_INT16SZ 2 +#define NS_INADDRSZ 4 +#define NS_IN6ADDRSZ 16 + +/* + * WARNING: Don't even consider trying to compile this on a system where + * sizeof(int) < 4. sizeof(int) > 4 is fine; all the world's not a VAX. + */ + +static int inet_pton4(const char *src, unsigned char *dst); +#ifdef AF_INET6 +static int inet_pton6(const char *src, unsigned char *dst); +#endif + +/* int + * inet_pton(af, src, dst) + * convert from presentation format (which usually means ASCII printable) + * to network format (which is usually some kind of binary format). + * return: + * 1 if the address was valid for the specified address family + * 0 if the address wasn't valid (`dst' is untouched in this case) + * -1 if some other error occurred (`dst' is untouched in this case, too) + * author: + * Paul Vixie, 1996. + */ +int +rep_inet_pton(int af, + const char *src, + void *dst) +{ + switch (af) { + case AF_INET: + return (inet_pton4(src, dst)); +#ifdef AF_INET6 + case AF_INET6: + return (inet_pton6(src, dst)); +#endif + default: + errno = EAFNOSUPPORT; + return (-1); + } + /* NOTREACHED */ +} + +/* int + * inet_pton4(src, dst) + * like inet_aton() but without all the hexadecimal and shorthand. + * return: + * 1 if `src' is a valid dotted quad, else 0. + * notice: + * does not touch `dst' unless it's returning 1. + * author: + * Paul Vixie, 1996. + */ +static int +inet_pton4(src, dst) + const char *src; + unsigned char *dst; +{ + static const char digits[] = "0123456789"; + int saw_digit, octets, ch; + unsigned char tmp[NS_INADDRSZ], *tp; + + saw_digit = 0; + octets = 0; + *(tp = tmp) = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr(digits, ch)) != NULL) { + unsigned int new = *tp * 10 + (pch - digits); + + if (new > 255) + return (0); + *tp = new; + if (! saw_digit) { + if (++octets > 4) + return (0); + saw_digit = 1; + } + } else if (ch == '.' && saw_digit) { + if (octets == 4) + return (0); + *++tp = 0; + saw_digit = 0; + } else + return (0); + } + if (octets < 4) + return (0); + memcpy(dst, tmp, NS_INADDRSZ); + return (1); +} + +/* int + * inet_pton6(src, dst) + * convert presentation level address to network order binary form. + * return: + * 1 if `src' is a valid [RFC1884 2.2] address, else 0. + * notice: + * (1) does not touch `dst' unless it's returning 1. + * (2) :: in a full address is silently ignored. + * credit: + * inspired by Mark Andrews. + * author: + * Paul Vixie, 1996. + */ +#ifdef AF_INET6 +static int +inet_pton6(src, dst) + const char *src; + unsigned char *dst; +{ + static const char xdigits_l[] = "0123456789abcdef", + xdigits_u[] = "0123456789ABCDEF"; + unsigned char tmp[NS_IN6ADDRSZ], *tp, *endp, *colonp; + const char *xdigits, *curtok; + int ch, saw_xdigit; + unsigned int val; + + memset((tp = tmp), '\0', NS_IN6ADDRSZ); + endp = tp + NS_IN6ADDRSZ; + colonp = NULL; + /* Leading :: requires some special handling. */ + if (*src == ':') + if (*++src != ':') + return (0); + curtok = src; + saw_xdigit = 0; + val = 0; + while ((ch = *src++) != '\0') { + const char *pch; + + if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) + pch = strchr((xdigits = xdigits_u), ch); + if (pch != NULL) { + val <<= 4; + val |= (pch - xdigits); + if (val > 0xffff) + return (0); + saw_xdigit = 1; + continue; + } + if (ch == ':') { + curtok = src; + if (!saw_xdigit) { + if (colonp) + return (0); + colonp = tp; + continue; + } + if (tp + NS_INT16SZ > endp) + return (0); + *tp++ = (unsigned char) (val >> 8) & 0xff; + *tp++ = (unsigned char) val & 0xff; + saw_xdigit = 0; + val = 0; + continue; + } + if (ch == '.' && ((tp + NS_INADDRSZ) <= endp) && + inet_pton4(curtok, tp) > 0) { + tp += NS_INADDRSZ; + saw_xdigit = 0; + break; /* '\0' was seen by inet_pton4(). */ + } + return (0); + } + if (saw_xdigit) { + if (tp + NS_INT16SZ > endp) + return (0); + *tp++ = (unsigned char) (val >> 8) & 0xff; + *tp++ = (unsigned char) val & 0xff; + } + if (colonp != NULL) { + /* + * Since some memmove()'s erroneously fail to handle + * overlapping regions, we'll do the shift by hand. + */ + const int n = tp - colonp; + int i; + + for (i = 1; i <= n; i++) { + endp[- i] = colonp[n - i]; + colonp[n - i] = 0; + } + tp = endp; + } + if (tp != endp) + return (0); + memcpy(dst, tmp, NS_IN6ADDRSZ); + return (1); +} +#endif diff --git a/lib/replace/poll.c b/lib/replace/poll.c new file mode 100644 index 0000000..1105617 --- /dev/null +++ b/lib/replace/poll.c @@ -0,0 +1,139 @@ +/* + Unix SMB/CIFS implementation. + poll.c - poll wrapper + + This file is based on code from libssh (LGPLv2.1+ at the time it + was downloaded), thus the following copyrights: + + Copyright (c) 2009-2010 by Andreas Schneider <mail@cynapses.org> + Copyright (c) 2003-2009 by Aris Adamantiadis + Copyright (c) 2009 Aleksandar Kanchev + Copyright (C) Volker Lendecke 2011 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "replace.h" +#include "system/select.h" +#ifdef HAVE_SYS_TIME_H +#include <sys/time.h> +#endif +#ifdef HAVE_SYS_IOCTL_H +#include <sys/ioctl.h> +#endif + + +int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout) +{ + fd_set rfds, wfds, efds; + struct timeval tv, *ptv; + int max_fd; + int rc; + nfds_t i; + + if ((fds == NULL) && (nfds != 0)) { + errno = EFAULT; + return -1; + } + + FD_ZERO(&rfds); + FD_ZERO(&wfds); + FD_ZERO(&efds); + + rc = 0; + max_fd = 0; + + /* compute fd_sets and find largest descriptor */ + for (i = 0; i < nfds; i++) { + if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { + fds[i].revents = POLLNVAL; + continue; + } + + if (fds[i].events & (POLLIN | POLLRDNORM)) { + FD_SET(fds[i].fd, &rfds); + } + if (fds[i].events & (POLLOUT | POLLWRNORM | POLLWRBAND)) { + FD_SET(fds[i].fd, &wfds); + } + if (fds[i].events & (POLLPRI | POLLRDBAND)) { + FD_SET(fds[i].fd, &efds); + } + if (fds[i].fd > max_fd && + (fds[i].events & (POLLIN | POLLOUT | POLLPRI | + POLLRDNORM | POLLRDBAND | + POLLWRNORM | POLLWRBAND))) { + max_fd = fds[i].fd; + } + } + + if (timeout < 0) { + ptv = NULL; + } else { + ptv = &tv; + if (timeout == 0) { + tv.tv_sec = 0; + tv.tv_usec = 0; + } else { + tv.tv_sec = timeout / 1000; + tv.tv_usec = (timeout % 1000) * 1000; + } + } + + rc = select(max_fd + 1, &rfds, &wfds, &efds, ptv); + if (rc < 0) { + return -1; + } + + for (rc = 0, i = 0; i < nfds; i++) { + if ((fds[i].fd < 0) || (fds[i].fd >= FD_SETSIZE)) { + continue; + } + + fds[i].revents = 0; + + if (FD_ISSET(fds[i].fd, &rfds)) { + int err = errno; + int available = 0; + int ret; + + /* support for POLLHUP */ + ret = ioctl(fds[i].fd, FIONREAD, &available); + if ((ret == -1) || (available == 0)) { + fds[i].revents |= POLLHUP; + } else { + fds[i].revents |= fds[i].events + & (POLLIN | POLLRDNORM); + } + + errno = err; + } + if (FD_ISSET(fds[i].fd, &wfds)) { + fds[i].revents |= fds[i].events + & (POLLOUT | POLLWRNORM | POLLWRBAND); + } + if (FD_ISSET(fds[i].fd, &efds)) { + fds[i].revents |= fds[i].events + & (POLLPRI | POLLRDBAND); + } + if (fds[i].revents & ~POLLHUP) { + rc++; + } + } + return rc; +} diff --git a/lib/replace/replace-test.h b/lib/replace/replace-test.h new file mode 100644 index 0000000..ed8e75e --- /dev/null +++ b/lib/replace/replace-test.h @@ -0,0 +1,9 @@ +#ifndef __LIB_REPLACE_REPLACE_TEST_H__ +#define __LIB_REPLACE_REPLACE_TEST_H__ + +int libreplace_test_strptime(void); +int test_readdir_os2_delete(void); +int getifaddrs_test(void); + +#endif /* __LIB_REPLACE_REPLACE_TEST_H__ */ + diff --git a/lib/replace/replace-testsuite.h b/lib/replace/replace-testsuite.h new file mode 100644 index 0000000..b28dbec --- /dev/null +++ b/lib/replace/replace-testsuite.h @@ -0,0 +1,10 @@ +#ifndef __LIB_REPLACE_REPLACE_TESTSUITE_H__ +#define __LIB_REPLACE_REPLACE_TESTSUITE_H__ + +#include <stdbool.h> +struct torture_context; + +bool torture_local_replace(struct torture_context *ctx); + +#endif /* __LIB_REPLACE_REPLACE_TESTSUITE_H__ */ + diff --git a/lib/replace/replace.c b/lib/replace/replace.c new file mode 100644 index 0000000..68829f2 --- /dev/null +++ b/lib/replace/replace.c @@ -0,0 +1,1145 @@ +/* + Unix SMB/CIFS implementation. + replacement routines for broken systems + Copyright (C) Andrew Tridgell 1992-1998 + Copyright (C) Jelmer Vernooij 2005-2008 + Copyright (C) Matthieu Patou 2010 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" + +#include "system/filesys.h" +#include "system/time.h" +#include "system/network.h" +#include "system/passwd.h" +#include "system/syslog.h" +#include "system/locale.h" +#include "system/wait.h" + +#ifdef HAVE_SYS_SYSCALL_H +#include <sys/syscall.h> +#endif + +#ifdef _WIN32 +#define mkdir(d,m) _mkdir(d) +#endif + +void replace_dummy(void); +void replace_dummy(void) {} + +#ifndef HAVE_FTRUNCATE + /******************************************************************* +ftruncate for operating systems that don't have it +********************************************************************/ +int rep_ftruncate(int f, off_t l) +{ +#ifdef HAVE_CHSIZE + return chsize(f,l); +#elif defined(F_FREESP) + struct flock fl; + + fl.l_whence = 0; + fl.l_len = 0; + fl.l_start = l; + fl.l_type = F_WRLCK; + return fcntl(f, F_FREESP, &fl); +#else +#error "you must have a ftruncate function" +#endif +} +#endif /* HAVE_FTRUNCATE */ + + +#ifndef HAVE_STRLCPY +/* + * Like strncpy but does not 0 fill the buffer and always null + * terminates. bufsize is the size of the destination buffer. + * Returns the length of s. + */ +size_t rep_strlcpy(char *d, const char *s, size_t bufsize) +{ + size_t len = strlen(s); + size_t ret = len; + + if (bufsize <= 0) { + return 0; + } + if (len >= bufsize) { + len = bufsize - 1; + } + memcpy(d, s, len); + d[len] = 0; + return ret; +} +#endif + +#ifndef HAVE_STRLCAT +/* like strncat but does not 0 fill the buffer and always null + terminates. bufsize is the length of the buffer, which should + be one more than the maximum resulting string length */ +size_t rep_strlcat(char *d, const char *s, size_t bufsize) +{ + size_t len1 = strnlen(d, bufsize); + size_t len2 = strlen(s); + size_t ret = len1 + len2; + + if (len1+len2 >= bufsize) { + if (bufsize < (len1+1)) { + return ret; + } + len2 = bufsize - (len1+1); + } + if (len2 > 0) { + memcpy(d+len1, s, len2); + d[len1+len2] = 0; + } + return ret; +} +#endif + +#ifndef HAVE_MKTIME +/******************************************************************* +a mktime() replacement for those who don't have it - contributed by +C.A. Lademann <cal@zls.com> +Corrections by richard.kettlewell@kewill.com +********************************************************************/ + +#define MINUTE 60 +#define HOUR 60*MINUTE +#define DAY 24*HOUR +#define YEAR 365*DAY +time_t rep_mktime(struct tm *t) +{ + struct tm *u; + time_t epoch = 0; + int n; + int mon [] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }, + y, m, i; + + if(t->tm_year < 70) + return((time_t)-1); + + n = t->tm_year + 1900 - 1; + epoch = (t->tm_year - 70) * YEAR + + ((n / 4 - n / 100 + n / 400) - (1969 / 4 - 1969 / 100 + 1969 / 400)) * DAY; + + y = t->tm_year + 1900; + m = 0; + + for(i = 0; i < t->tm_mon; i++) { + epoch += mon [m] * DAY; + if(m == 1 && y % 4 == 0 && (y % 100 != 0 || y % 400 == 0)) + epoch += DAY; + + if(++m > 11) { + m = 0; + y++; + } + } + + epoch += (t->tm_mday - 1) * DAY; + epoch += t->tm_hour * HOUR + t->tm_min * MINUTE + t->tm_sec; + + if((u = localtime(&epoch)) != NULL) { + t->tm_sec = u->tm_sec; + t->tm_min = u->tm_min; + t->tm_hour = u->tm_hour; + t->tm_mday = u->tm_mday; + t->tm_mon = u->tm_mon; + t->tm_year = u->tm_year; + t->tm_wday = u->tm_wday; + t->tm_yday = u->tm_yday; + t->tm_isdst = u->tm_isdst; + } + + return(epoch); +} +#endif /* !HAVE_MKTIME */ + + +#ifndef HAVE_INITGROUPS +/**************************************************************************** + some systems don't have an initgroups call +****************************************************************************/ +int rep_initgroups(char *name, gid_t id) +{ +#ifndef HAVE_SETGROUPS + /* yikes! no SETGROUPS or INITGROUPS? how can this work? */ + errno = ENOSYS; + return -1; +#else /* HAVE_SETGROUPS */ + +#include <grp.h> + + gid_t *grouplst = NULL; + int max_gr = NGROUPS_MAX; + int ret; + int i,j; + struct group *g; + char *gr; + + if((grouplst = malloc(sizeof(gid_t) * max_gr)) == NULL) { + errno = ENOMEM; + return -1; + } + + grouplst[0] = id; + i = 1; + while (i < max_gr && ((g = (struct group *)getgrent()) != (struct group *)NULL)) { + if (g->gr_gid == id) + continue; + j = 0; + gr = g->gr_mem[0]; + while (gr && (*gr != (char)NULL)) { + if (strcmp(name,gr) == 0) { + grouplst[i] = g->gr_gid; + i++; + gr = (char *)NULL; + break; + } + gr = g->gr_mem[++j]; + } + } + endgrent(); + ret = setgroups(i, grouplst); + free(grouplst); + return ret; +#endif /* HAVE_SETGROUPS */ +} +#endif /* HAVE_INITGROUPS */ + + +#ifndef HAVE_MEMMOVE +/******************************************************************* +safely copies memory, ensuring no overlap problems. +this is only used if the machine does not have its own memmove(). +this is not the fastest algorithm in town, but it will do for our +needs. +********************************************************************/ +void *rep_memmove(void *dest,const void *src,int size) +{ + unsigned long d,s; + int i; + if (dest==src || !size) return(dest); + + d = (unsigned long)dest; + s = (unsigned long)src; + + if ((d >= (s+size)) || (s >= (d+size))) { + /* no overlap */ + memcpy(dest,src,size); + return(dest); + } + + if (d < s) { + /* we can forward copy */ + if (s-d >= sizeof(int) && + !(s%sizeof(int)) && + !(d%sizeof(int)) && + !(size%sizeof(int))) { + /* do it all as words */ + int *idest = (int *)dest; + int *isrc = (int *)src; + size /= sizeof(int); + for (i=0;i<size;i++) idest[i] = isrc[i]; + } else { + /* simplest */ + char *cdest = (char *)dest; + char *csrc = (char *)src; + for (i=0;i<size;i++) cdest[i] = csrc[i]; + } + } else { + /* must backward copy */ + if (d-s >= sizeof(int) && + !(s%sizeof(int)) && + !(d%sizeof(int)) && + !(size%sizeof(int))) { + /* do it all as words */ + int *idest = (int *)dest; + int *isrc = (int *)src; + size /= sizeof(int); + for (i=size-1;i>=0;i--) idest[i] = isrc[i]; + } else { + /* simplest */ + char *cdest = (char *)dest; + char *csrc = (char *)src; + for (i=size-1;i>=0;i--) cdest[i] = csrc[i]; + } + } + return(dest); +} +#endif /* HAVE_MEMMOVE */ + +#ifndef HAVE_STRDUP +/**************************************************************************** +duplicate a string +****************************************************************************/ +char *rep_strdup(const char *s) +{ + size_t len; + char *ret; + + if (!s) return(NULL); + + len = strlen(s)+1; + ret = (char *)malloc(len); + if (!ret) return(NULL); + memcpy(ret,s,len); + return(ret); +} +#endif /* HAVE_STRDUP */ + +#ifndef HAVE_SETLINEBUF +void rep_setlinebuf(FILE *stream) +{ + setvbuf(stream, (char *)NULL, _IOLBF, 0); +} +#endif /* HAVE_SETLINEBUF */ + +#ifndef HAVE_VSYSLOG +#ifdef HAVE_SYSLOG +void rep_vsyslog (int facility_priority, const char *format, va_list arglist) +{ + char *msg = NULL; + vasprintf(&msg, format, arglist); + if (!msg) + return; + syslog(facility_priority, "%s", msg); + free(msg); +} +#endif /* HAVE_SYSLOG */ +#endif /* HAVE_VSYSLOG */ + +#ifndef HAVE_STRNLEN +/** + Some platforms don't have strnlen +**/ + size_t rep_strnlen(const char *s, size_t max) +{ + size_t len; + + for (len = 0; len < max; len++) { + if (s[len] == '\0') { + break; + } + } + return len; +} +#endif + +#ifndef HAVE_STRNDUP +/** + Some platforms don't have strndup. +**/ +char *rep_strndup(const char *s, size_t n) +{ + char *ret; + + n = strnlen(s, n); + ret = malloc(n+1); + if (!ret) + return NULL; + memcpy(ret, s, n); + ret[n] = 0; + + return ret; +} +#endif + +#if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) +int rep_waitpid(pid_t pid,int *status,int options) +{ + return wait4(pid, status, options, NULL); +} +#endif + +#ifndef HAVE_SETEUID +int rep_seteuid(uid_t euid) +{ +#ifdef HAVE_SETRESUID + return setresuid(-1, euid, -1); +#else + errno = ENOSYS; + return -1; +#endif +} +#endif + +#ifndef HAVE_SETEGID +int rep_setegid(gid_t egid) +{ +#ifdef HAVE_SETRESGID + return setresgid(-1, egid, -1); +#else + errno = ENOSYS; + return -1; +#endif +} +#endif + +/******************************************************************* +os/2 also doesn't have chroot +********************************************************************/ +#ifndef HAVE_CHROOT +int rep_chroot(const char *dname) +{ + errno = ENOSYS; + return -1; +} +#endif + +/***************************************************************** + Possibly replace mkstemp if it is broken. +*****************************************************************/ + +#ifndef HAVE_SECURE_MKSTEMP +int rep_mkstemp(char *template) +{ + /* have a reasonable go at emulating it. Hope that + the system mktemp() isn't completely hopeless */ + mktemp(template); + if (template[0] == 0) + return -1; + return open(template, O_CREAT|O_EXCL|O_RDWR, 0600); +} +#endif + +#ifndef HAVE_MKDTEMP +char *rep_mkdtemp(char *template) +{ + char *dname; + + if ((dname = mktemp(template))) { + if (mkdir(dname, 0700) >= 0) { + return dname; + } + } + + return NULL; +} +#endif + +/***************************************************************** + Watch out: this is not thread safe. +*****************************************************************/ + +#ifndef HAVE_PREAD +ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset) +{ + if (lseek(__fd, __offset, SEEK_SET) != __offset) { + return -1; + } + return read(__fd, __buf, __nbytes); +} +#endif + +/***************************************************************** + Watch out: this is not thread safe. +*****************************************************************/ + +#ifndef HAVE_PWRITE +ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset) +{ + if (lseek(__fd, __offset, SEEK_SET) != __offset) { + return -1; + } + return write(__fd, __buf, __nbytes); +} +#endif + +#ifndef HAVE_STRCASESTR +char *rep_strcasestr(const char *haystack, const char *needle) +{ + const char *s; + size_t nlen = strlen(needle); + for (s=haystack;*s;s++) { + if (toupper(*needle) == toupper(*s) && + strncasecmp(s, needle, nlen) == 0) { + return (char *)((uintptr_t)s); + } + } + return NULL; +} +#endif + +#ifndef HAVE_STRSEP +char *rep_strsep(char **pps, const char *delim) +{ + char *ret = *pps; + char *p = *pps; + + if (p == NULL) { + return NULL; + } + p += strcspn(p, delim); + if (*p == '\0') { + *pps = NULL; + } else { + *p = '\0'; + *pps = p + 1; + } + return ret; +} +#endif + +#ifndef HAVE_STRTOK_R +/* based on GLIBC version, copyright Free Software Foundation */ +char *rep_strtok_r(char *s, const char *delim, char **save_ptr) +{ + char *token; + + if (s == NULL) s = *save_ptr; + + s += strspn(s, delim); + if (*s == '\0') { + *save_ptr = s; + return NULL; + } + + token = s; + s = strpbrk(token, delim); + if (s == NULL) { + *save_ptr = token + strlen(token); + } else { + *s = '\0'; + *save_ptr = s + 1; + } + + return token; +} +#endif + + +#ifndef HAVE_STRTOLL +long long int rep_strtoll(const char *str, char **endptr, int base) +{ +#ifdef HAVE_STRTOQ + return strtoq(str, endptr, base); +#elif defined(HAVE___STRTOLL) + return __strtoll(str, endptr, base); +#elif SIZEOF_LONG == SIZEOF_LONG_LONG + return (long long int) strtol(str, endptr, base); +#else +# error "You need a strtoll function" +#endif +} +#else +#ifdef HAVE_BSD_STRTOLL +#undef strtoll +long long int rep_strtoll(const char *str, char **endptr, int base) +{ + int saved_errno = errno; + long long int nb = strtoll(str, endptr, base); + /* With glibc EINVAL is only returned if base is not ok */ + if (errno == EINVAL) { + if (base == 0 || (base >1 && base <37)) { + /* Base was ok so it's because we were not + * able to make the conversion. + * Let's reset errno. + */ + errno = saved_errno; + } + } + return nb; +} +#endif /* HAVE_BSD_STRTOLL */ +#endif /* HAVE_STRTOLL */ + + +#ifndef HAVE_STRTOULL +unsigned long long int rep_strtoull(const char *str, char **endptr, int base) +{ +#ifdef HAVE_STRTOUQ + return strtouq(str, endptr, base); +#elif defined(HAVE___STRTOULL) + return __strtoull(str, endptr, base); +#elif SIZEOF_LONG == SIZEOF_LONG_LONG + return (unsigned long long int) strtoul(str, endptr, base); +#else +# error "You need a strtoull function" +#endif +} +#else +#ifdef HAVE_BSD_STRTOLL +#undef strtoull +unsigned long long int rep_strtoull(const char *str, char **endptr, int base) +{ + int saved_errno = errno; + unsigned long long int nb = strtoull(str, endptr, base); + /* With glibc EINVAL is only returned if base is not ok */ + if (errno == EINVAL) { + if (base == 0 || (base >1 && base <37)) { + /* Base was ok so it's because we were not + * able to make the conversion. + * Let's reset errno. + */ + errno = saved_errno; + } + } + return nb; +} +#endif /* HAVE_BSD_STRTOLL */ +#endif /* HAVE_STRTOULL */ + +#ifndef HAVE_SETENV +int rep_setenv(const char *name, const char *value, int overwrite) +{ + char *p; + size_t l1, l2; + int ret; + + if (!overwrite && getenv(name)) { + return 0; + } + + l1 = strlen(name); + l2 = strlen(value); + + p = malloc(l1+l2+2); + if (p == NULL) { + return -1; + } + memcpy(p, name, l1); + p[l1] = '='; + memcpy(p+l1+1, value, l2); + p[l1+l2+1] = 0; + + ret = putenv(p); + if (ret != 0) { + free(p); + } + + return ret; +} +#endif + +#ifndef HAVE_UNSETENV +int rep_unsetenv(const char *name) +{ + extern char **environ; + size_t len = strlen(name); + size_t i, count; + + if (environ == NULL || getenv(name) == NULL) { + return 0; + } + + for (i=0;environ[i];i++) /* noop */ ; + + count=i; + + for (i=0;i<count;) { + if (strncmp(environ[i], name, len) == 0 && environ[i][len] == '=') { + /* note: we do _not_ free the old variable here. It is unsafe to + do so, as the pointer may not have come from malloc */ + memmove(&environ[i], &environ[i+1], (count-i)*sizeof(char *)); + count--; + } else { + i++; + } + } + + return 0; +} +#endif + +#ifndef HAVE_UTIME +int rep_utime(const char *filename, const struct utimbuf *buf) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_UTIMES +int rep_utimes(const char *filename, const struct timeval tv[2]) +{ + struct utimbuf u; + + u.actime = tv[0].tv_sec; + if (tv[0].tv_usec > 500000) { + u.actime += 1; + } + + u.modtime = tv[1].tv_sec; + if (tv[1].tv_usec > 500000) { + u.modtime += 1; + } + + return utime(filename, &u); +} +#endif + +#ifndef HAVE_DUP2 +int rep_dup2(int oldfd, int newfd) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_CHOWN +/** +chown isn't used much but OS/2 doesn't have it +**/ +int rep_chown(const char *fname, uid_t uid, gid_t gid) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_LINK +int rep_link(const char *oldpath, const char *newpath) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_READLINK +int rep_readlink(const char *path, char *buf, size_t bufsiz) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_SYMLINK +int rep_symlink(const char *oldpath, const char *newpath) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_LCHOWN +int rep_lchown(const char *fname,uid_t uid,gid_t gid) +{ + errno = ENOSYS; + return -1; +} +#endif + +#ifndef HAVE_REALPATH +char *rep_realpath(const char *path, char *resolved_path) +{ + /* As realpath is not a system call we can't return ENOSYS. */ + errno = EINVAL; + return NULL; +} +#endif + + +#ifndef HAVE_MEMMEM +void *rep_memmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen) +{ + if (needlelen == 0) { + return discard_const(haystack); + } + while (haystacklen >= needlelen) { + char *p = (char *)memchr(haystack, *(const char *)needle, + haystacklen-(needlelen-1)); + if (!p) return NULL; + if (memcmp(p, needle, needlelen) == 0) { + return p; + } + haystack = p+1; + haystacklen -= (p - (const char *)haystack) + 1; + } + return NULL; +} +#endif + +#if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) +int rep_vdprintf(int fd, const char *format, va_list ap) +{ + char *s = NULL; + int ret; + + vasprintf(&s, format, ap); + if (s == NULL) { + errno = ENOMEM; + return -1; + } + ret = write(fd, s, strlen(s)); + free(s); + return ret; +} +#endif + +#if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) +int rep_dprintf(int fd, const char *format, ...) +{ + int ret; + va_list ap; + + va_start(ap, format); + ret = vdprintf(fd, format, ap); + va_end(ap); + + return ret; +} +#endif + +#ifndef HAVE_GET_CURRENT_DIR_NAME +char *rep_get_current_dir_name(void) +{ + char buf[PATH_MAX+1]; + char *p; + p = getcwd(buf, sizeof(buf)); + if (p == NULL) { + return NULL; + } + return strdup(p); +} +#endif + +#ifndef HAVE_STRERROR_R +int rep_strerror_r(int errnum, char *buf, size_t buflen) +{ + char *s = strerror(errnum); + if (strlen(s)+1 > buflen) { + errno = ERANGE; + return -1; + } + strncpy(buf, s, buflen); + return 0; +} +#elif (!defined(STRERROR_R_XSI_NOT_GNU)) +#undef strerror_r +int rep_strerror_r(int errnum, char *buf, size_t buflen) +{ + char *s = strerror_r(errnum, buf, buflen); + if (s == NULL) { + /* Shouldn't happen, should always get a string */ + return EINVAL; + } + if (s != buf) { + strlcpy(buf, s, buflen); + if (strlen(s) > buflen - 1) { + return ERANGE; + } + } + return 0; + +} +#endif + +#ifndef HAVE_CLOCK_GETTIME +int rep_clock_gettime(clockid_t clk_id, struct timespec *tp) +{ + struct timeval tval; + switch (clk_id) { + case 0: /* CLOCK_REALTIME :*/ +#if defined(HAVE_GETTIMEOFDAY_TZ) || defined(HAVE_GETTIMEOFDAY_TZ_VOID) + gettimeofday(&tval,NULL); +#else + gettimeofday(&tval); +#endif + tp->tv_sec = tval.tv_sec; + tp->tv_nsec = tval.tv_usec * 1000; + break; + default: + errno = EINVAL; + return -1; + } + return 0; +} +#endif + +#ifndef HAVE_MEMALIGN +void *rep_memalign( size_t align, size_t size ) +{ +#if defined(HAVE_POSIX_MEMALIGN) + void *p = NULL; + int ret = posix_memalign( &p, align, size ); + if ( ret == 0 ) + return p; + + return NULL; +#else + /* On *BSD systems memaligns doesn't exist, but memory will + * be aligned on allocations of > pagesize. */ +#if defined(SYSCONF_SC_PAGESIZE) + size_t pagesize = (size_t)sysconf(_SC_PAGESIZE); +#elif defined(HAVE_GETPAGESIZE) + size_t pagesize = (size_t)getpagesize(); +#else + size_t pagesize = (size_t)-1; +#endif + if (pagesize == (size_t)-1) { + errno = ENOSYS; + return NULL; + } + if (size < pagesize) { + size = pagesize; + } + return malloc(size); +#endif +} +#endif + +#ifndef HAVE_GETPEEREID +int rep_getpeereid(int s, uid_t *uid, gid_t *gid) +{ +#if defined(HAVE_PEERCRED) + struct ucred cred; + socklen_t cred_len = sizeof(struct ucred); + int ret; + +#undef getsockopt + ret = getsockopt(s, SOL_SOCKET, SO_PEERCRED, (void *)&cred, &cred_len); + if (ret != 0) { + return -1; + } + + if (cred_len != sizeof(struct ucred)) { + errno = EINVAL; + return -1; + } + + *uid = cred.uid; + *gid = cred.gid; + return 0; +#else + errno = ENOSYS; + return -1; +#endif +} +#endif + +#ifndef HAVE_USLEEP +int rep_usleep(useconds_t sec) +{ + struct timeval tval; + /* + * Fake it with select... + */ + tval.tv_sec = 0; + tval.tv_usec = usecs/1000; + select(0,NULL,NULL,NULL,&tval); + return 0; +} +#endif /* HAVE_USLEEP */ + +#ifndef HAVE_SETPROCTITLE +void rep_setproctitle(const char *fmt, ...) +{ +} +#endif +#ifndef HAVE_SETPROCTITLE_INIT +void rep_setproctitle_init(int argc, char *argv[], char *envp[]) +{ +} +#endif + +#ifndef HAVE_MEMSET_S +# ifndef RSIZE_MAX +# define RSIZE_MAX (SIZE_MAX >> 1) +# endif + +int rep_memset_s(void *dest, size_t destsz, int ch, size_t count) +{ + if (dest == NULL) { + return EINVAL; + } + + if (destsz > RSIZE_MAX || + count > RSIZE_MAX || + count > destsz) { + return ERANGE; + } + +#if defined(HAVE_MEMSET_EXPLICIT) + memset_explicit(dest, destsz, ch, count); +#else /* HAVE_MEMSET_EXPLICIT */ + memset(dest, ch, count); +# if defined(HAVE_GCC_VOLATILE_MEMORY_PROTECTION) + /* See http://llvm.org/bugs/show_bug.cgi?id=15495 */ + __asm__ volatile("" : : "g"(dest) : "memory"); +# endif /* HAVE_GCC_VOLATILE_MEMORY_PROTECTION */ +#endif /* HAVE_MEMSET_EXPLICIT */ + + return 0; +} +#endif /* HAVE_MEMSET_S */ + +#ifndef HAVE_GETPROGNAME +# ifndef HAVE_PROGRAM_INVOCATION_SHORT_NAME +# define PROGNAME_SIZE 32 +static char rep_progname[PROGNAME_SIZE]; +# endif /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ + +const char *rep_getprogname(void) +{ +#ifdef HAVE_PROGRAM_INVOCATION_SHORT_NAME + return program_invocation_short_name; +#else /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ + FILE *fp = NULL; + char cmdline[4096] = {0}; + char *p = NULL; + pid_t pid; + size_t nread; + int len; + int rc; + + if (rep_progname[0] != '\0') { + return rep_progname; + } + + len = snprintf(rep_progname, sizeof(rep_progname), "%s", "<unknown>"); + if (len <= 0) { + return NULL; + } + + pid = getpid(); + if (pid <= 1 || pid == (pid_t)-1) { + return NULL; + } + + len = snprintf(cmdline, + sizeof(cmdline), + "/proc/%u/cmdline", + (unsigned int)pid); + if (len <= 0 || len == sizeof(cmdline)) { + return NULL; + } + + fp = fopen(cmdline, "r"); + if (fp == NULL) { + return NULL; + } + + nread = fread(cmdline, 1, sizeof(cmdline) - 1, fp); + + rc = fclose(fp); + if (rc != 0) { + return NULL; + } + + if (nread == 0) { + return NULL; + } + + cmdline[nread] = '\0'; + + p = strrchr(cmdline, '/'); + if (p != NULL) { + p++; + } else { + p = cmdline; + } + + len = strlen(p); + if (len > PROGNAME_SIZE) { + p[PROGNAME_SIZE - 1] = '\0'; + } + + (void)snprintf(rep_progname, sizeof(rep_progname), "%s", p); + + return rep_progname; +#endif /* HAVE_PROGRAM_INVOCATION_SHORT_NAME */ +} +#endif /* HAVE_GETPROGNAME */ + +#ifndef HAVE_COPY_FILE_RANGE +ssize_t rep_copy_file_range(int fd_in, + loff_t *off_in, + int fd_out, + loff_t *off_out, + size_t len, + unsigned int flags) +{ +# ifdef HAVE_SYSCALL_COPY_FILE_RANGE + return syscall(__NR_copy_file_range, + fd_in, + off_in, + fd_out, + off_out, + len, + flags); +# endif /* HAVE_SYSCALL_COPY_FILE_RANGE */ + errno = ENOSYS; + return -1; +} +#endif /* HAVE_COPY_FILE_RANGE */ + +#ifndef HAVE_OPENAT2 + +/* fallback known wellknown __NR_openat2 values */ +#ifndef __NR_openat2 +# if defined(LINUX) && defined(HAVE_SYS_SYSCALL_H) +# if defined(__i386__) +# define __NR_openat2 437 +# elif defined(__x86_64__) && defined(__LP64__) +# define __NR_openat2 437 /* 437 0x1B5 */ +# elif defined(__x86_64__) && defined(__ILP32__) +# define __NR_openat2 1073742261 /* 1073742261 0x400001B5 */ +# elif defined(__aarch64__) +# define __NR_openat2 437 +# elif defined(__arm__) +# define __NR_openat2 437 +# elif defined(__sparc__) +# define __NR_openat2 437 +# endif +# endif /* defined(LINUX) && defined(HAVE_SYS_SYSCALL_H) */ +#endif /* !__NR_openat2 */ + +#ifdef DISABLE_OPATH +/* + * systems without O_PATH also don't have openat2, + * so make sure we at a realistic combination. + */ +#undef __NR_openat2 +#endif /* DISABLE_OPATH */ + +long rep_openat2(int dirfd, const char *pathname, + struct open_how *how, size_t size) +{ +#ifdef __NR_openat2 +#if _FILE_OFFSET_BITS == 64 && SIZE_MAX == 0xffffffffUL && defined(O_LARGEFILE) + struct open_how __how; + +#if defined(O_PATH) && ! defined(DISABLE_OPATH) + if ((how->flags & O_PATH) == 0) +#endif + { + if (sizeof(__how) == size) { + __how = *how; + + __how.flags |= O_LARGEFILE; + how = &__how; + } + } +#endif + + return syscall(__NR_openat2, + dirfd, + pathname, + how, + size); +#else + errno = ENOSYS; + return -1; +#endif +} +#endif /* !HAVE_OPENAT2 */ diff --git a/lib/replace/replace.h b/lib/replace/replace.h new file mode 100644 index 0000000..a6a2b40 --- /dev/null +++ b/lib/replace/replace.h @@ -0,0 +1,1112 @@ +/* + Unix SMB/CIFS implementation. + + macros to go along with the lib/replace/ portability layer code + + Copyright (C) Andrew Tridgell 2005 + Copyright (C) Jelmer Vernooij 2006-2008 + Copyright (C) Jeremy Allison 2007. + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#ifndef _LIBREPLACE_REPLACE_H +#define _LIBREPLACE_REPLACE_H + +#ifndef NO_CONFIG_H +#include "config.h" +#endif + +#ifdef HAVE_STANDARDS_H +#include <standards.h> +#endif + +/* + * Needs to be defined before std*.h and string*.h are included + * As it's also needed when Python.h is the first header we + * require a global -D__STDC_WANT_LIB_EXT1__=1 + */ +#if __STDC_WANT_LIB_EXT1__ != 1 +#error -D__STDC_WANT_LIB_EXT1__=1 required +#endif + +#include <stdio.h> +#include <stdlib.h> +#include <stdarg.h> +#include <errno.h> + +#ifndef HAVE_DECL_EWOULDBLOCK +#define EWOULDBLOCK EAGAIN +#endif + +#if defined(_MSC_VER) || defined(__MINGW32__) +#include "win32_replace.h" +#endif + + +#ifdef HAVE_INTTYPES_H +#define __STDC_FORMAT_MACROS +#include <inttypes.h> +#elif defined(HAVE_STDINT_H) +#include <stdint.h> +/* force off HAVE_INTTYPES_H so that roken doesn't try to include both, + which causes a warning storm on irix */ +#undef HAVE_INTTYPES_H +#endif + +#ifdef HAVE_MALLOC_H +#include <malloc.h> +#endif + +#ifndef __PRI64_PREFIX +# if __WORDSIZE == 64 && ! defined __APPLE__ +# define __PRI64_PREFIX "l" +# else +# define __PRI64_PREFIX "ll" +# endif +#endif + +/* Decimal notation. */ +#ifndef PRId8 +# define PRId8 "d" +#endif +#ifndef PRId16 +# define PRId16 "d" +#endif +#ifndef PRId32 +# define PRId32 "d" +#endif +#ifndef PRId64 +# define PRId64 __PRI64_PREFIX "d" +#endif + +#ifndef PRIi8 +# define PRIi8 "i" +#endif +#ifndef PRIi16 +# define PRIi16 "i" +#endif +#ifndef PRIi32 +# define PRIi32 "i" +#endif +#ifndef PRIi64 +# define PRIi64 __PRI64_PREFIX "i" +#endif + +#ifndef PRIu8 +# define PRIu8 "u" +#endif +#ifndef PRIu16 +# define PRIu16 "u" +#endif +#ifndef PRIu32 +# define PRIu32 "u" +#endif +#ifndef PRIu64 +# define PRIu64 __PRI64_PREFIX "u" +#endif + +#ifndef SCNd8 +# define SCNd8 "hhd" +#endif +#ifndef SCNd16 +# define SCNd16 "hd" +#endif +#ifndef SCNd32 +# define SCNd32 "d" +#endif +#ifndef SCNd64 +# define SCNd64 __PRI64_PREFIX "d" +#endif + +#ifndef SCNi8 +# define SCNi8 "hhi" +#endif +#ifndef SCNi16 +# define SCNi16 "hi" +#endif +#ifndef SCNi32 +# define SCNi32 "i" +#endif +#ifndef SCNi64 +# define SCNi64 __PRI64_PREFIX "i" +#endif + +#ifndef SCNu8 +# define SCNu8 "hhu" +#endif +#ifndef SCNu16 +# define SCNu16 "hu" +#endif +#ifndef SCNu32 +# define SCNu32 "u" +#endif +#ifndef SCNu64 +# define SCNu64 __PRI64_PREFIX "u" +#endif + +#ifdef HAVE_BSD_STRING_H +#include <bsd/string.h> +#endif + +#ifdef HAVE_BSD_UNISTD_H +#include <bsd/unistd.h> +#endif + +#ifdef HAVE_UNISTD_H +#include <unistd.h> +#endif + +#ifdef HAVE_STRING_H +#include <string.h> +#endif + +#ifdef HAVE_STRINGS_H +#include <strings.h> +#endif + +#ifdef HAVE_SYS_TYPES_H +#include <sys/types.h> +#endif + +#ifdef HAVE_SYS_SYSMACROS_H +#include <sys/sysmacros.h> +#endif + +#ifdef HAVE_SETPROCTITLE_H +#include <setproctitle.h> +#endif + +#if STDC_HEADERS +#include <stdlib.h> +#include <stddef.h> +#endif + +#ifdef HAVE_LINUX_TYPES_H +/* + * This is needed as some broken header files require this to be included early + */ +#include <linux/types.h> +#endif + +#ifndef HAVE_STRERROR +extern const char *const sys_errlist[]; +#define strerror(i) sys_errlist[i] +#endif + +#ifndef HAVE_ERRNO_DECL +extern int errno; +#endif + +#ifndef HAVE_STRDUP +#define strdup rep_strdup +char *rep_strdup(const char *s); +#endif + +#ifndef HAVE_MEMMOVE +#define memmove rep_memmove +void *rep_memmove(void *dest,const void *src,int size); +#endif + +#ifndef HAVE_MEMMEM +#define memmem rep_memmem +void *rep_memmem(const void *haystack, size_t haystacklen, + const void *needle, size_t needlelen); +#endif + +#ifndef HAVE_MEMALIGN +#define memalign rep_memalign +void *rep_memalign(size_t boundary, size_t size); +#endif + +#ifndef HAVE_MKTIME +#define mktime rep_mktime +/* prototype is in "system/time.h" */ +#endif + +#ifndef HAVE_TIMEGM +#define timegm rep_timegm +/* prototype is in "system/time.h" */ +#endif + +#ifndef HAVE_UTIME +#define utime rep_utime +/* prototype is in "system/time.h" */ +#endif + +#ifndef HAVE_UTIMES +#define utimes rep_utimes +/* prototype is in "system/time.h" */ +#endif + +#ifndef HAVE_STRLCPY +#define strlcpy rep_strlcpy +size_t rep_strlcpy(char *d, const char *s, size_t bufsize); +#endif + +#ifndef HAVE_STRLCAT +#define strlcat rep_strlcat +size_t rep_strlcat(char *d, const char *s, size_t bufsize); +#endif + +#ifndef HAVE_CLOSEFROM +#define closefrom rep_closefrom +int rep_closefrom(int lower); +#endif + + +#if (defined(BROKEN_STRNDUP) || !defined(HAVE_STRNDUP)) +#undef HAVE_STRNDUP +#define strndup rep_strndup +char *rep_strndup(const char *s, size_t n); +#endif + +#if (defined(BROKEN_STRNLEN) || !defined(HAVE_STRNLEN)) +#undef HAVE_STRNLEN +#define strnlen rep_strnlen +size_t rep_strnlen(const char *s, size_t n); +#endif + +#if !defined(HAVE_DECL_ENVIRON) +# ifdef __APPLE__ +# include <crt_externs.h> +# define environ (*_NSGetEnviron()) +# else /* __APPLE__ */ +extern char **environ; +# endif /* __APPLE */ +#endif /* !defined(HAVE_DECL_ENVIRON) */ + +#ifndef HAVE_SETENV +#define setenv rep_setenv +int rep_setenv(const char *name, const char *value, int overwrite); +#else +#ifndef HAVE_SETENV_DECL +int setenv(const char *name, const char *value, int overwrite); +#endif +#endif + +#ifndef HAVE_UNSETENV +#define unsetenv rep_unsetenv +int rep_unsetenv(const char *name); +#endif + +#ifndef HAVE_SETEUID +#define seteuid rep_seteuid +int rep_seteuid(uid_t); +#endif + +#ifndef HAVE_SETEGID +#define setegid rep_setegid +int rep_setegid(gid_t); +#endif + +#if (defined(USE_SETRESUID) && !defined(HAVE_SETRESUID_DECL)) +/* stupid glibc */ +int setresuid(uid_t ruid, uid_t euid, uid_t suid); +#endif +#if (defined(USE_SETRESUID) && !defined(HAVE_SETRESGID_DECL)) +int setresgid(gid_t rgid, gid_t egid, gid_t sgid); +#endif + +#ifndef HAVE_CHOWN +#define chown rep_chown +int rep_chown(const char *path, uid_t uid, gid_t gid); +#endif + +#ifndef HAVE_CHROOT +#define chroot rep_chroot +int rep_chroot(const char *dirname); +#endif + +#ifndef HAVE_LINK +#define link rep_link +int rep_link(const char *oldpath, const char *newpath); +#endif + +#ifndef HAVE_READLINK +#define readlink rep_readlink +ssize_t rep_readlink(const char *path, char *buf, size_t bufsize); +#endif + +#ifndef HAVE_SYMLINK +#define symlink rep_symlink +int rep_symlink(const char *oldpath, const char *newpath); +#endif + +#ifndef HAVE_REALPATH +#define realpath rep_realpath +char *rep_realpath(const char *path, char *resolved_path); +#endif + +#ifndef HAVE_LCHOWN +#define lchown rep_lchown +int rep_lchown(const char *fname,uid_t uid,gid_t gid); +#endif + +#ifdef HAVE_UNIX_H +#include <unix.h> +#endif + +#ifndef HAVE_SETLINEBUF +#define setlinebuf rep_setlinebuf +void rep_setlinebuf(FILE *); +#endif + +#ifndef HAVE_STRCASESTR +#define strcasestr rep_strcasestr +char *rep_strcasestr(const char *haystack, const char *needle); +#endif + +#ifndef HAVE_STRSEP +#define strsep rep_strsep +char *rep_strsep(char **pps, const char *delim); +#endif + +#ifndef HAVE_STRTOK_R +#define strtok_r rep_strtok_r +char *rep_strtok_r(char *s, const char *delim, char **save_ptr); +#endif + + + +#ifndef HAVE_STRTOLL +#define strtoll rep_strtoll +long long int rep_strtoll(const char *str, char **endptr, int base); +#else +#ifdef HAVE_BSD_STRTOLL +#define strtoll rep_strtoll +long long int rep_strtoll(const char *str, char **endptr, int base); +#endif +#endif + +#ifndef HAVE_STRTOULL +#define strtoull rep_strtoull +unsigned long long int rep_strtoull(const char *str, char **endptr, int base); +#else +#ifdef HAVE_BSD_STRTOLL /* yes, it's not HAVE_BSD_STRTOULL */ +#define strtoull rep_strtoull +unsigned long long int rep_strtoull(const char *str, char **endptr, int base); +#endif +#endif + +#ifndef HAVE_FTRUNCATE +#define ftruncate rep_ftruncate +int rep_ftruncate(int,off_t); +#endif + +#ifndef HAVE_INITGROUPS +#define initgroups rep_initgroups +int rep_initgroups(char *name, gid_t id); +#endif + +#if !defined(HAVE_BZERO) && defined(HAVE_MEMSET) +#define bzero(a,b) memset((a),'\0',(b)) +#endif + +#ifndef HAVE_DLERROR +#define dlerror rep_dlerror +char *rep_dlerror(void); +#endif + +#ifndef HAVE_DLOPEN +#define dlopen rep_dlopen +#ifdef DLOPEN_TAKES_UNSIGNED_FLAGS +void *rep_dlopen(const char *name, unsigned int flags); +#else +void *rep_dlopen(const char *name, int flags); +#endif +#endif + +#ifndef HAVE_DLSYM +#define dlsym rep_dlsym +void *rep_dlsym(void *handle, const char *symbol); +#endif + +#ifndef HAVE_DLCLOSE +#define dlclose rep_dlclose +int rep_dlclose(void *handle); +#endif + +#ifndef HAVE_SOCKETPAIR +#define socketpair rep_socketpair +/* prototype is in system/network.h */ +#endif + +/* for old gcc releases that don't have the feature test macro __has_attribute */ +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#ifndef PRINTF_ATTRIBUTE +#if __has_attribute(format) || (__GNUC__ >= 3) +/** Use gcc attribute to check printf fns. a1 is the 1-based index of + * the parameter containing the format, and a2 the index of the first + * argument. Note that some gcc 2.x versions don't handle this + * properly **/ +#define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) +#else +#define PRINTF_ATTRIBUTE(a1, a2) +#endif +#endif + +#ifndef _DEPRECATED_ +#if __has_attribute(deprecated) || (__GNUC__ >= 3) +#define _DEPRECATED_ __attribute__ ((deprecated)) +#else +#define _DEPRECATED_ +#endif +#endif + +#if !defined(HAVE_VDPRINTF) || !defined(HAVE_C99_VSNPRINTF) +#define vdprintf rep_vdprintf +int rep_vdprintf(int fd, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); +#endif + +#if !defined(HAVE_DPRINTF) || !defined(HAVE_C99_VSNPRINTF) +#define dprintf rep_dprintf +int rep_dprintf(int fd, const char *format, ...) PRINTF_ATTRIBUTE(2,3); +#endif + +#if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) +#define vasprintf rep_vasprintf +int rep_vasprintf(char **ptr, const char *format, va_list ap) PRINTF_ATTRIBUTE(2,0); +#endif + +#if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) +#define snprintf rep_snprintf +int rep_snprintf(char *,size_t ,const char *, ...) PRINTF_ATTRIBUTE(3,4); +#endif + +#if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) +#define vsnprintf rep_vsnprintf +int rep_vsnprintf(char *,size_t ,const char *, va_list ap) PRINTF_ATTRIBUTE(3,0); +#endif + +#if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) +#define asprintf rep_asprintf +int rep_asprintf(char **,const char *, ...) PRINTF_ATTRIBUTE(2,3); +#endif + +#if !defined(HAVE_C99_VSNPRINTF) +#ifdef REPLACE_BROKEN_PRINTF +/* + * We do not redefine printf by default + * as it breaks the build if system headers + * use __attribute__((format(printf, 3, 0))) + * instead of __attribute__((format(__printf__, 3, 0))) + */ +#define printf rep_printf +#endif +int rep_printf(const char *, ...) PRINTF_ATTRIBUTE(1,2); +#endif + +#if !defined(HAVE_C99_VSNPRINTF) +#define fprintf rep_fprintf +int rep_fprintf(FILE *stream, const char *, ...) PRINTF_ATTRIBUTE(2,3); +#endif + +#ifndef HAVE_VSYSLOG +#ifdef HAVE_SYSLOG +#define vsyslog rep_vsyslog +void rep_vsyslog (int facility_priority, const char *format, va_list arglist) PRINTF_ATTRIBUTE(2,0); +#endif +#endif + +/* we used to use these fns, but now we have good replacements + for snprintf and vsnprintf */ +#define slprintf snprintf + + +#ifndef HAVE_VA_COPY +#undef va_copy +#ifdef HAVE___VA_COPY +#define va_copy(dest, src) __va_copy(dest, src) +#else +#define va_copy(dest, src) (dest) = (src) +#endif +#endif + +#ifndef HAVE_VOLATILE +#define volatile +#endif + +#ifndef HAVE_COMPARISON_FN_T +typedef int (*comparison_fn_t)(const void *, const void *); +#endif + +#ifndef HAVE_WORKING_STRPTIME +#define strptime rep_strptime +struct tm; +char *rep_strptime(const char *buf, const char *format, struct tm *tm); +#endif + +#ifndef HAVE_DUP2 +#define dup2 rep_dup2 +int rep_dup2(int oldfd, int newfd); +#endif + +/* Load header file for dynamic linking stuff */ +#ifdef HAVE_DLFCN_H +#include <dlfcn.h> +#endif + +#ifndef RTLD_LAZY +#define RTLD_LAZY 0 +#endif +#ifndef RTLD_NOW +#define RTLD_NOW 0 +#endif +#ifndef RTLD_GLOBAL +#define RTLD_GLOBAL 0 +#endif + +#ifndef HAVE_SECURE_MKSTEMP +#define mkstemp(path) rep_mkstemp(path) +int rep_mkstemp(char *temp); +#endif + +#ifndef HAVE_MKDTEMP +#define mkdtemp rep_mkdtemp +char *rep_mkdtemp(char *template); +#endif + +#ifndef HAVE_PREAD +#define pread rep_pread +ssize_t rep_pread(int __fd, void *__buf, size_t __nbytes, off_t __offset); +#define LIBREPLACE_PREAD_REPLACED 1 +#else +#define LIBREPLACE_PREAD_NOT_REPLACED 1 +#endif + +#ifndef HAVE_PWRITE +#define pwrite rep_pwrite +ssize_t rep_pwrite(int __fd, const void *__buf, size_t __nbytes, off_t __offset); +#define LIBREPLACE_PWRITE_REPLACED 1 +#else +#define LIBREPLACE_PWRITE_NOT_REPLACED 1 +#endif + +#if !defined(HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) +#define inet_ntoa rep_inet_ntoa +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_INET_PTON +#define inet_pton rep_inet_pton +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_INET_NTOP +#define inet_ntop rep_inet_ntop +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_INET_ATON +#define inet_aton rep_inet_aton +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_CONNECT +#define connect rep_connect +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_GETHOSTBYNAME +#define gethostbyname rep_gethostbyname +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_GETIFADDRS +#define getifaddrs rep_getifaddrs +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_FREEIFADDRS +#define freeifaddrs rep_freeifaddrs +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_GET_CURRENT_DIR_NAME +#define get_current_dir_name rep_get_current_dir_name +char *rep_get_current_dir_name(void); +#endif + +#if (!defined(HAVE_STRERROR_R) || !defined(STRERROR_R_XSI_NOT_GNU)) +#define strerror_r rep_strerror_r +int rep_strerror_r(int errnum, char *buf, size_t buflen); +#endif + +#if !defined(HAVE_CLOCK_GETTIME) +#define clock_gettime rep_clock_gettime +#endif + +#ifdef HAVE_LIMITS_H +#include <limits.h> +#endif + +#ifdef HAVE_SYS_PARAM_H +#include <sys/param.h> +#endif + +/* The extra casts work around common compiler bugs. */ +#define _TYPE_SIGNED(t) (! ((t) 0 < (t) -1)) +/* The outer cast is needed to work around a bug in Cray C 5.0.3.0. + It is necessary at least when t == time_t. */ +#define _TYPE_MINIMUM(t) ((t) (_TYPE_SIGNED (t) \ + ? ~ (t) 0 << (sizeof (t) * CHAR_BIT - 1) : (t) 0)) +#define _TYPE_MAXIMUM(t) ((t) (~ (t) 0 - _TYPE_MINIMUM (t))) + +#ifndef UINT16_MAX +#define UINT16_MAX 65535 +#endif + +#ifndef UINT32_MAX +#define UINT32_MAX (4294967295U) +#endif + +#ifndef UINT64_MAX +#define UINT64_MAX ((uint64_t)-1) +#endif + +#ifndef INT64_MAX +#define INT64_MAX 9223372036854775807LL +#endif + +#ifndef CHAR_BIT +#define CHAR_BIT 8 +#endif + +#ifndef INT32_MAX +#define INT32_MAX _TYPE_MAXIMUM(int32_t) +#endif + +#ifdef HAVE_STDBOOL_H +#include <stdbool.h> +#endif + +#ifndef HAVE_BOOL +#error Need a real boolean type +#endif + +#if !defined(HAVE_INTPTR_T) +typedef long long intptr_t ; +#define __intptr_t_defined +#endif + +#if !defined(HAVE_UINTPTR_T) +typedef unsigned long long uintptr_t ; +#define __uintptr_t_defined +#endif + +#if !defined(HAVE_PTRDIFF_T) +typedef unsigned long long ptrdiff_t ; +#endif + +/* + * to prevent <rpcsvc/yp_prot.h> from doing a redefine of 'bool' + * + * IRIX, HPUX, MacOS 10 and Solaris need BOOL_DEFINED + * Tru64 needs _BOOL_EXISTS + * AIX needs _BOOL,_TRUE,_FALSE + */ +#ifndef BOOL_DEFINED +#define BOOL_DEFINED +#endif +#ifndef _BOOL_EXISTS +#define _BOOL_EXISTS +#endif +#ifndef _BOOL +#define _BOOL +#endif + +#ifndef __bool_true_false_are_defined +#define __bool_true_false_are_defined +#endif + +#ifndef true +#define true (1) +#endif +#ifndef false +#define false (0) +#endif + +#ifndef _TRUE +#define _TRUE true +#endif +#ifndef _FALSE +#define _FALSE false +#endif + +#ifndef HAVE_FUNCTION_MACRO +#ifdef HAVE_func_MACRO +#define __FUNCTION__ __func__ +#else +#define __FUNCTION__ ("") +#endif +#endif + + +#ifndef MIN +#define MIN(a,b) ((a)<(b)?(a):(b)) +#endif + +#ifndef MAX +#define MAX(a,b) ((a)>(b)?(a):(b)) +#endif + +#if !defined(HAVE_VOLATILE) +#define volatile +#endif + +/** + this is a warning hack. The idea is to use this everywhere that we + get the "discarding const" warning from gcc. That doesn't actually + fix the problem of course, but it means that when we do get to + cleaning them up we can do it by searching the code for + discard_const. + + It also means that other error types aren't as swamped by the noise + of hundreds of const warnings, so we are more likely to notice when + we get new errors. + + Please only add more uses of this macro when you find it + _really_ hard to fix const warnings. Our aim is to eventually use + this function in only a very few places. + + Also, please call this via the discard_const_p() macro interface, as that + makes the return type safe. +*/ +#define discard_const(ptr) ((void *)((uintptr_t)(ptr))) + +/** Type-safe version of discard_const */ +#define discard_const_p(type, ptr) ((type *)discard_const(ptr)) + +#ifndef __STRING +#define __STRING(x) #x +#endif + +#ifndef __STRINGSTRING +#define __STRINGSTRING(x) __STRING(x) +#endif + +#ifndef __LINESTR__ +#define __LINESTR__ __STRINGSTRING(__LINE__) +#endif + +#ifndef __location__ +#define __location__ __FILE__ ":" __LINESTR__ +#endif + +/** + * Zero a structure. + */ +#define ZERO_STRUCT(x) memset_s((char *)&(x), sizeof(x), 0, sizeof(x)) + +/** + * Zero a structure given a pointer to the structure. + */ +#define ZERO_STRUCTP(x) do { \ + if ((x) != NULL) { \ + memset_s((char *)(x), sizeof(*(x)), 0, sizeof(*(x))); \ + } \ +} while(0) + +/** + * Zero a structure given a pointer to the structure - no zero check + */ +#define ZERO_STRUCTPN(x) memset_s((char *)(x), sizeof(*(x)), 0, sizeof(*(x))) + +/** + * Zero an array - note that sizeof(array) must work - ie. it must not be a + * pointer + */ +#define ZERO_ARRAY(x) memset_s((char *)(x), sizeof(x), 0, sizeof(x)) + +/** + * Zero a given len of an array + */ +#define ZERO_ARRAY_LEN(x, l) memset_s((char *)(x), (l), 0, (l)) + +/** + * Explicitly zero data from memory. This is guaranteed to be not optimized + * away. + */ +#define BURN_DATA(x) memset_s((char *)&(x), sizeof(x), 0, sizeof(x)) + +/** + * Explicitly zero data from memory. This is guaranteed to be not optimized + * away. + */ +#define BURN_DATA_SIZE(x, s) memset_s((char *)&(x), (s), 0, (s)) + +/** + * Explicitly zero data from memory. This is guaranteed to be not optimized + * away. + */ +#define BURN_PTR_SIZE(x, s) memset_s((x), (s), 0, (s)) + +/** + * Explicitly zero data in string. This is guaranteed to be not optimized + * away. + */ +#define BURN_STR(x) do { \ + if ((x) != NULL) { \ + size_t s = strlen(x); \ + memset_s((x), s, 0, s); \ + } \ + } while(0) + +/** + * Work out how many elements there are in a static array. + */ +#ifdef ARRAY_SIZE +#undef ARRAY_SIZE +#endif +#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0])) + +/** + * Remove an array element by moving the rest one down + */ +#define ARRAY_DEL_ELEMENT(a,i,n) \ +if((i)<((n)-1)){memmove(&((a)[(i)]),&((a)[(i)+1]),(sizeof(*(a))*((n)-(i)-1)));} + +/** + * Insert an array element by moving the rest one up + * + */ +#define ARRAY_INSERT_ELEMENT(__array,__old_last_idx,__new_elem,__new_idx) do { \ + if ((__new_idx) < (__old_last_idx)) { \ + const void *__src = &((__array)[(__new_idx)]); \ + void *__dst = &((__array)[(__new_idx)+1]); \ + size_t __num = (__old_last_idx)-(__new_idx); \ + size_t __len = sizeof(*(__array)) * __num; \ + memmove(__dst, __src, __len); \ + } \ + (__array)[(__new_idx)] = (__new_elem); \ +} while(0) + +/** + * Pointer difference macro + */ +#define PTR_DIFF(p1,p2) ((ptrdiff_t)(((const char *)(p1)) - (const char *)(p2))) + +#ifdef __COMPAR_FN_T +#define QSORT_CAST (__compar_fn_t) +#endif + +#ifndef QSORT_CAST +#define QSORT_CAST (int (*)(const void *, const void *)) +#endif + +#ifndef PATH_MAX +#define PATH_MAX 1024 +#endif + +#ifndef MAX_DNS_NAME_LENGTH +#define MAX_DNS_NAME_LENGTH 256 /* Actually 255 but +1 for terminating null. */ +#endif + +#ifdef HAVE_CRYPT_H +#include <crypt.h> +#endif + +/* these macros gain us a few percent of speed on gcc */ +#if (__GNUC__ >= 3) +/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 + as its first argument */ +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif +#else +#ifndef likely +#define likely(x) (x) +#endif +#ifndef unlikely +#define unlikely(x) (x) +#endif +#endif + +#ifndef HAVE_FDATASYNC +#define fdatasync(fd) fsync(fd) +#elif !defined(HAVE_DECL_FDATASYNC) +int fdatasync(int ); +#endif + +/* these are used to mark symbols as local to a shared lib, or + * publicly available via the shared lib API */ +#ifndef _PUBLIC_ +#ifdef HAVE_VISIBILITY_ATTR +#define _PUBLIC_ __attribute__((visibility("default"))) +#else +#define _PUBLIC_ +#endif +#endif + +#ifndef _PRIVATE_ +#ifdef HAVE_VISIBILITY_ATTR +# define _PRIVATE_ __attribute__((visibility("hidden"))) +#else +# define _PRIVATE_ +#endif +#endif + +#ifndef HAVE_POLL +#define poll rep_poll +/* prototype is in "system/network.h" */ +#endif + +#ifndef HAVE_GETPEEREID +#define getpeereid rep_getpeereid +int rep_getpeereid(int s, uid_t *uid, gid_t *gid); +#endif + +#ifndef HAVE_USLEEP +#define usleep rep_usleep +typedef long useconds_t; +int usleep(useconds_t); +#endif + +#ifndef HAVE_SETPROCTITLE +#define setproctitle rep_setproctitle +void rep_setproctitle(const char *fmt, ...) PRINTF_ATTRIBUTE(1, 2); +#endif + +#ifndef HAVE_SETPROCTITLE_INIT +#define setproctitle_init rep_setproctitle_init +void rep_setproctitle_init(int argc, char *argv[], char *envp[]); +#endif + +#ifndef HAVE_MEMSET_S +#define memset_s rep_memset_s +int rep_memset_s(void *dest, size_t destsz, int ch, size_t count); +#endif + +#ifndef HAVE_GETPROGNAME +#define getprogname rep_getprogname +const char *rep_getprogname(void); +#endif + +#ifndef HAVE_COPY_FILE_RANGE +#define copy_file_range rep_copy_file_range +ssize_t rep_copy_file_range(int fd_in, + loff_t *off_in, + int fd_out, + loff_t *off_out, + size_t len, + unsigned int flags); +#endif /* HAVE_COPY_FILE_RANGE */ + +#ifndef FALL_THROUGH +# ifdef HAVE_FALLTHROUGH_ATTRIBUTE +# define FALL_THROUGH __attribute__ ((fallthrough)) +# else /* HAVE_FALLTHROUGH_ATTRIBUTE */ +# define FALL_THROUGH ((void)0) +# endif /* HAVE_FALLTHROUGH_ATTRIBUTE */ +#endif /* FALL_THROUGH */ + +struct __rep_cwrap_enabled_state { + const char *fnname; + bool cached; + bool retval; +}; + +static inline bool __rep_cwrap_enabled_fn(struct __rep_cwrap_enabled_state *state) +{ + bool (*__wrapper_enabled_fn)(void) = NULL; + + if (state->cached) { + return state->retval; + } + state->retval = false; + state->cached = true; + + __wrapper_enabled_fn = (bool (*)(void))dlsym(RTLD_DEFAULT, state->fnname); + if (__wrapper_enabled_fn == NULL) { + return state->retval; + } + + state->retval = __wrapper_enabled_fn(); + return state->retval; +} + +static inline bool nss_wrapper_enabled(void) +{ + struct __rep_cwrap_enabled_state state = { + .fnname = "nss_wrapper_enabled", + }; + return __rep_cwrap_enabled_fn(&state); +} +static inline bool nss_wrapper_hosts_enabled(void) +{ + struct __rep_cwrap_enabled_state state = { + .fnname = "nss_wrapper_hosts_enabled", + }; + return __rep_cwrap_enabled_fn(&state); +} +static inline bool socket_wrapper_enabled(void) +{ + struct __rep_cwrap_enabled_state state = { + .fnname = "socket_wrapper_enabled", + }; + return __rep_cwrap_enabled_fn(&state); +} +static inline bool uid_wrapper_enabled(void) +{ + struct __rep_cwrap_enabled_state state = { + .fnname = "uid_wrapper_enabled", + }; + return __rep_cwrap_enabled_fn(&state); +} + +static inline bool _hexcharval(char c, uint8_t *val) +{ + if ((c >= '0') && (c <= '9')) { *val = c - '0'; return true; } + if ((c >= 'a') && (c <= 'f')) { *val = c - 'a' + 10; return true; } + if ((c >= 'A') && (c <= 'F')) { *val = c - 'A' + 10; return true; } + return false; +} + +static inline bool hex_byte(const char *in, uint8_t *out) +{ + uint8_t hi=0, lo=0; + bool ok = _hexcharval(in[0], &hi) && _hexcharval(in[1], &lo); + *out = (hi<<4)+lo; + return ok; +} + +/* Needed for Solaris atomic_add_XX functions. */ +#if defined(HAVE_SYS_ATOMIC_H) +#include <sys/atomic.h> +#endif + +/* + * This handles the case of missing pthread support and ensures code can use + * __thread unconditionally, such that when built on a platform without pthread + * support, the __thread qualifier is an empty define. + */ +#ifndef HAVE___THREAD +# ifdef HAVE_PTHREAD +# error Configure failed to detect pthread library with missing TLS support +# endif +#define HAVE___THREAD +#endif + +#endif /* _LIBREPLACE_REPLACE_H */ diff --git a/lib/replace/snprintf.c b/lib/replace/snprintf.c new file mode 100644 index 0000000..dd878fc --- /dev/null +++ b/lib/replace/snprintf.c @@ -0,0 +1,1536 @@ +/* + * NOTE: If you change this file, please merge it into rsync, samba, etc. + */ + +/* + * Copyright Patrick Powell 1995 + * This code is based on code written by Patrick Powell (papowell@astart.com) + * It may be used for any purpose as long as this notice remains intact + * on all source code distributions + */ + +/************************************************************** + * Original: + * Patrick Powell Tue Apr 11 09:48:21 PDT 1995 + * A bombproof version of doprnt (dopr) included. + * Sigh. This sort of thing is always nasty do deal with. Note that + * the version here does not include floating point... + * + * snprintf() is used instead of sprintf() as it does limit checks + * for string length. This covers a nasty loophole. + * + * The other functions are there to prevent NULL pointers from + * causing nasty effects. + * + * More Recently: + * Brandon Long <blong@fiction.net> 9/15/96 for mutt 0.43 + * This was ugly. It is still ugly. I opted out of floating point + * numbers, but the formatter understands just about everything + * from the normal C string format, at least as far as I can tell from + * the Solaris 2.5 printf(3S) man page. + * + * Brandon Long <blong@fiction.net> 10/22/97 for mutt 0.87.1 + * Ok, added some minimal floating point support, which means this + * probably requires libm on most operating systems. Don't yet + * support the exponent (e,E) and sigfig (g,G). Also, fmtint() + * was pretty badly broken, it just wasn't being exercised in ways + * which showed it, so that's been fixed. Also, formatted the code + * to mutt conventions, and removed dead code left over from the + * original. Also, there is now a builtin-test, just compile with: + * gcc -DTEST_SNPRINTF -o snprintf snprintf.c -lm + * and run snprintf for results. + * + * Thomas Roessler <roessler@guug.de> 01/27/98 for mutt 0.89i + * The PGP code was using unsigned hexadecimal formats. + * Unfortunately, unsigned formats simply didn't work. + * + * Michael Elkins <me@cs.hmc.edu> 03/05/98 for mutt 0.90.8 + * The original code assumed that both snprintf() and vsnprintf() were + * missing. Some systems only have snprintf() but not vsnprintf(), so + * the code is now broken down under HAVE_SNPRINTF and HAVE_VSNPRINTF. + * + * Andrew Tridgell (tridge@samba.org) Oct 1998 + * fixed handling of %.0f + * added test for HAVE_LONG_DOUBLE + * + * tridge@samba.org, idra@samba.org, April 2001 + * got rid of fcvt code (twas buggy and made testing harder) + * added C99 semantics + * + * date: 2002/12/19 19:56:31; author: herb; state: Exp; lines: +2 -0 + * actually print args for %g and %e + * + * date: 2002/06/03 13:37:52; author: jmcd; state: Exp; lines: +8 -0 + * Since includes.h isn't included here, VA_COPY has to be defined here. I don't + * see any include file that is guaranteed to be here, so I'm defining it + * locally. Fixes AIX and Solaris builds. + * + * date: 2002/06/03 03:07:24; author: tridge; state: Exp; lines: +5 -13 + * put the ifdef for HAVE_VA_COPY in one place rather than in lots of + * functions + * + * date: 2002/05/17 14:51:22; author: jmcd; state: Exp; lines: +21 -4 + * Fix usage of va_list passed as an arg. Use __va_copy before using it + * when it exists. + * + * date: 2002/04/16 22:38:04; author: idra; state: Exp; lines: +20 -14 + * Fix incorrect zpadlen handling in fmtfp. + * Thanks to Ollie Oldham <ollie.oldham@metro-optix.com> for spotting it. + * few mods to make it easier to compile the tests. + * added the "Ollie" test to the floating point ones. + * + * Martin Pool (mbp@samba.org) April 2003 + * Remove NO_CONFIG_H so that the test case can be built within a source + * tree with less trouble. + * Remove unnecessary SAFE_FREE() definition. + * + * Martin Pool (mbp@samba.org) May 2003 + * Put in a prototype for dummy_snprintf() to quiet compiler warnings. + * + * Move #endif to make sure VA_COPY, LDOUBLE, etc are defined even + * if the C library has some snprintf functions already. + * + * Darren Tucker (dtucker@zip.com.au) 2005 + * Fix bug allowing read overruns of the source string with "%.*s" + * Usually harmless unless the read runs outside the process' allocation + * (eg if your malloc does guard pages) in which case it will segfault. + * From OpenSSH. Also added test for same. + * + * Simo Sorce (idra@samba.org) Jan 2006 + * + * Add support for position independent parameters + * fix fmtstr now it conforms to sprintf wrt min.max + * + **************************************************************/ + +#include "replace.h" +#include "system/locale.h" + +#ifdef TEST_SNPRINTF /* need math library headers for testing */ + +/* In test mode, we pretend that this system doesn't have any snprintf + * functions, regardless of what config.h says. */ +# undef HAVE_SNPRINTF +# undef HAVE_VSNPRINTF +# undef HAVE_C99_VSNPRINTF +# undef HAVE_ASPRINTF +# undef HAVE_VASPRINTF +# include <math.h> +#endif /* TEST_SNPRINTF */ + +#if defined(HAVE_SNPRINTF) && defined(HAVE_VSNPRINTF) && defined(HAVE_C99_VSNPRINTF) +/* only include stdio.h if we are not re-defining snprintf or vsnprintf */ +#include <stdio.h> + /* make the compiler happy with an empty file */ + void dummy_snprintf(void); + void dummy_snprintf(void) {} +#endif /* HAVE_SNPRINTF, etc */ + +/* yes this really must be a ||. Don't muck with this (tridge) */ +#if !defined(HAVE_VSNPRINTF) || !defined(HAVE_C99_VSNPRINTF) + +#ifdef HAVE_LONG_DOUBLE +#define LDOUBLE long double +#else +#define LDOUBLE double +#endif + +#ifdef HAVE_LONG_LONG +#define LLONG long long +#else +#define LLONG long +#endif + +#ifndef VA_COPY +#ifdef HAVE_VA_COPY +#define VA_COPY(dest, src) va_copy(dest, src) +#else +#ifdef HAVE___VA_COPY +#define VA_COPY(dest, src) __va_copy(dest, src) +#else +#define VA_COPY(dest, src) (dest) = (src) +#endif +#endif + +/* + * dopr(): poor man's version of doprintf + */ + +/* format read states */ +#define DP_S_DEFAULT 0 +#define DP_S_FLAGS 1 +#define DP_S_MIN 2 +#define DP_S_DOT 3 +#define DP_S_MAX 4 +#define DP_S_MOD 5 +#define DP_S_CONV 6 +#define DP_S_DONE 7 + +/* format flags - Bits */ +#define DP_F_MINUS (1 << 0) +#define DP_F_PLUS (1 << 1) +#define DP_F_SPACE (1 << 2) +#define DP_F_NUM (1 << 3) +#define DP_F_ZERO (1 << 4) +#define DP_F_UP (1 << 5) +#define DP_F_UNSIGNED (1 << 6) + +/* Conversion Flags */ +#define DP_C_CHAR 1 +#define DP_C_SHORT 2 +#define DP_C_LONG 3 +#define DP_C_LDOUBLE 4 +#define DP_C_LLONG 5 +#define DP_C_SIZET 6 + +/* Chunk types */ +#define CNK_FMT_STR 0 +#define CNK_INT 1 +#define CNK_OCTAL 2 +#define CNK_UINT 3 +#define CNK_HEX 4 +#define CNK_FLOAT 5 +#define CNK_CHAR 6 +#define CNK_STRING 7 +#define CNK_PTR 8 +#define CNK_NUM 9 +#define CNK_PRCNT 10 + +#define char_to_int(p) ((p)- '0') +#ifndef MAX +#define MAX(p,q) (((p) >= (q)) ? (p) : (q)) +#endif + +struct pr_chunk { + int type; /* chunk type */ + int num; /* parameter number */ + int min; + int max; + int flags; + int cflags; + int start; + int len; + LLONG value; + LDOUBLE fvalue; + char *strvalue; + void *pnum; + struct pr_chunk *min_star; + struct pr_chunk *max_star; + struct pr_chunk *next; +}; + +struct pr_chunk_x { + struct pr_chunk **chunks; + int num; +}; + +static int dopr(char *buffer, size_t maxlen, const char *format, + va_list args_in); +static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, + char *value, int flags, int min, int max); +static void fmtint(char *buffer, size_t *currlen, size_t maxlen, + LLONG value, int base, int min, int max, int flags); +static void fmtfp(char *buffer, size_t *currlen, size_t maxlen, + LDOUBLE fvalue, int min, int max, int flags); +static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c); +static struct pr_chunk *new_chunk(void); +static int add_cnk_list_entry(struct pr_chunk_x **list, + int max_num, struct pr_chunk *chunk); + +static int dopr(char *buffer, size_t maxlen, const char *format, va_list args_in) +{ + char ch; + int state; + int pflag; + int pnum; + int pfirst; + size_t currlen; + va_list args; + const char *base; + struct pr_chunk *chunks = NULL; + struct pr_chunk *cnk = NULL; + struct pr_chunk_x *clist = NULL; + int max_pos; + int ret = -1; + + VA_COPY(args, args_in); + + state = DP_S_DEFAULT; + pfirst = 1; + pflag = 0; + pnum = 0; + + max_pos = 0; + base = format; + ch = *format++; + + /* retrieve the string structure as chunks */ + while (state != DP_S_DONE) { + if (ch == '\0') + state = DP_S_DONE; + + switch(state) { + case DP_S_DEFAULT: + + if (cnk) { + cnk->next = new_chunk(); + cnk = cnk->next; + } else { + cnk = new_chunk(); + } + if (!cnk) goto done; + if (!chunks) chunks = cnk; + + if (ch == '%') { + state = DP_S_FLAGS; + ch = *format++; + } else { + cnk->type = CNK_FMT_STR; + cnk->start = format - base -1; + while ((ch != '\0') && (ch != '%')) ch = *format++; + cnk->len = format - base - cnk->start -1; + } + break; + case DP_S_FLAGS: + switch (ch) { + case '-': + cnk->flags |= DP_F_MINUS; + ch = *format++; + break; + case '+': + cnk->flags |= DP_F_PLUS; + ch = *format++; + break; + case ' ': + cnk->flags |= DP_F_SPACE; + ch = *format++; + break; + case '#': + cnk->flags |= DP_F_NUM; + ch = *format++; + break; + case '0': + cnk->flags |= DP_F_ZERO; + ch = *format++; + break; + case 'I': + /* internationalization not supported yet */ + ch = *format++; + break; + default: + state = DP_S_MIN; + break; + } + break; + case DP_S_MIN: + if (isdigit((unsigned char)ch)) { + cnk->min = 10 * cnk->min + char_to_int (ch); + ch = *format++; + } else if (ch == '$') { + if (!pfirst && !pflag) { + /* parameters must be all positioned or none */ + goto done; + } + if (pfirst) { + pfirst = 0; + pflag = 1; + } + if (cnk->min == 0) /* what ?? */ + goto done; + cnk->num = cnk->min; + cnk->min = 0; + ch = *format++; + } else if (ch == '*') { + if (pfirst) pfirst = 0; + cnk->min_star = new_chunk(); + if (!cnk->min_star) /* out of memory :-( */ + goto done; + cnk->min_star->type = CNK_INT; + if (pflag) { + int num; + ch = *format++; + if (!isdigit((unsigned char)ch)) { + /* parameters must be all positioned or none */ + goto done; + } + for (num = 0; isdigit((unsigned char)ch); ch = *format++) { + num = 10 * num + char_to_int(ch); + } + cnk->min_star->num = num; + if (ch != '$') /* what ?? */ + goto done; + } else { + cnk->min_star->num = ++pnum; + } + max_pos = add_cnk_list_entry(&clist, max_pos, cnk->min_star); + if (max_pos == 0) /* out of memory :-( */ + goto done; + ch = *format++; + state = DP_S_DOT; + } else { + if (pfirst) pfirst = 0; + state = DP_S_DOT; + } + break; + case DP_S_DOT: + if (ch == '.') { + state = DP_S_MAX; + ch = *format++; + } else { + state = DP_S_MOD; + } + break; + case DP_S_MAX: + if (isdigit((unsigned char)ch)) { + if (cnk->max < 0) + cnk->max = 0; + cnk->max = 10 * cnk->max + char_to_int (ch); + ch = *format++; + } else if (ch == '$') { + if (!pfirst && !pflag) { + /* parameters must be all positioned or none */ + goto done; + } + if (cnk->max <= 0) /* what ?? */ + goto done; + cnk->num = cnk->max; + cnk->max = -1; + ch = *format++; + } else if (ch == '*') { + cnk->max_star = new_chunk(); + if (!cnk->max_star) /* out of memory :-( */ + goto done; + cnk->max_star->type = CNK_INT; + if (pflag) { + int num; + ch = *format++; + if (!isdigit((unsigned char)ch)) { + /* parameters must be all positioned or none */ + goto done; + } + for (num = 0; isdigit((unsigned char)ch); ch = *format++) { + num = 10 * num + char_to_int(ch); + } + cnk->max_star->num = num; + if (ch != '$') /* what ?? */ + goto done; + } else { + cnk->max_star->num = ++pnum; + } + max_pos = add_cnk_list_entry(&clist, max_pos, cnk->max_star); + if (max_pos == 0) /* out of memory :-( */ + goto done; + + ch = *format++; + state = DP_S_MOD; + } else { + state = DP_S_MOD; + } + break; + case DP_S_MOD: + switch (ch) { + case 'h': + cnk->cflags = DP_C_SHORT; + ch = *format++; + if (ch == 'h') { + cnk->cflags = DP_C_CHAR; + ch = *format++; + } + break; + case 'l': + cnk->cflags = DP_C_LONG; + ch = *format++; + if (ch == 'l') { /* It's a long long */ + cnk->cflags = DP_C_LLONG; + ch = *format++; + } + break; + case 'j': + cnk->cflags = DP_C_LLONG; + ch = *format++; + break; + case 'L': + cnk->cflags = DP_C_LDOUBLE; + ch = *format++; + break; + case 'z': + cnk->cflags = DP_C_SIZET; + ch = *format++; + break; + default: + break; + } + state = DP_S_CONV; + break; + case DP_S_CONV: + if (cnk->num == 0) cnk->num = ++pnum; + max_pos = add_cnk_list_entry(&clist, max_pos, cnk); + if (max_pos == 0) /* out of memory :-( */ + goto done; + + switch (ch) { + case 'd': + case 'i': + cnk->type = CNK_INT; + break; + case 'o': + cnk->type = CNK_OCTAL; + cnk->flags |= DP_F_UNSIGNED; + break; + case 'u': + cnk->type = CNK_UINT; + cnk->flags |= DP_F_UNSIGNED; + break; + case 'X': + cnk->flags |= DP_F_UP; + case 'x': + cnk->type = CNK_HEX; + cnk->flags |= DP_F_UNSIGNED; + break; + case 'A': + /* hex float not supported yet */ + case 'E': + case 'G': + case 'F': + cnk->flags |= DP_F_UP; + case 'a': + /* hex float not supported yet */ + case 'e': + case 'f': + case 'g': + cnk->type = CNK_FLOAT; + break; + case 'c': + cnk->type = CNK_CHAR; + break; + case 's': + cnk->type = CNK_STRING; + break; + case 'p': + cnk->type = CNK_PTR; + cnk->flags |= DP_F_UNSIGNED; + break; + case 'n': + cnk->type = CNK_NUM; + break; + case '%': + cnk->type = CNK_PRCNT; + break; + default: + /* Unknown, bail out*/ + goto done; + } + ch = *format++; + state = DP_S_DEFAULT; + break; + case DP_S_DONE: + break; + default: + /* hmm? */ + break; /* some picky compilers need this */ + } + } + + /* retrieve the format arguments */ + for (pnum = 0; pnum < max_pos; pnum++) { + int i; + + if (clist[pnum].num == 0) { + /* ignoring a parameter should not be permitted + * all parameters must be matched at least once + * BUT seem some system ignore this rule ... + * at least my glibc based system does --SSS + */ +#ifdef DEBUG_SNPRINTF + printf("parameter at position %d not used\n", pnum+1); +#endif + /* eat the parameter */ + va_arg (args, int); + continue; + } + for (i = 1; i < clist[pnum].num; i++) { + if (clist[pnum].chunks[0]->type != clist[pnum].chunks[i]->type) { + /* nooo no no! + * all the references to a parameter + * must be of the same type + */ + goto done; + } + } + cnk = clist[pnum].chunks[0]; + switch (cnk->type) { + case CNK_INT: + if (cnk->cflags == DP_C_SHORT) + cnk->value = va_arg (args, int); + else if (cnk->cflags == DP_C_LONG) + cnk->value = va_arg (args, long int); + else if (cnk->cflags == DP_C_LLONG) + cnk->value = va_arg (args, LLONG); + else if (cnk->cflags == DP_C_SIZET) + cnk->value = va_arg (args, ssize_t); + else + cnk->value = va_arg (args, int); + + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->value = cnk->value; + } + break; + + case CNK_OCTAL: + case CNK_UINT: + case CNK_HEX: + if (cnk->cflags == DP_C_SHORT) + cnk->value = va_arg (args, unsigned int); + else if (cnk->cflags == DP_C_LONG) + cnk->value = (unsigned long int)va_arg (args, unsigned long int); + else if (cnk->cflags == DP_C_LLONG) + cnk->value = (LLONG)va_arg (args, unsigned LLONG); + else if (cnk->cflags == DP_C_SIZET) + cnk->value = (size_t)va_arg (args, size_t); + else + cnk->value = (unsigned int)va_arg (args, unsigned int); + + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->value = cnk->value; + } + break; + + case CNK_FLOAT: + if (cnk->cflags == DP_C_LDOUBLE) + cnk->fvalue = va_arg (args, LDOUBLE); + else + cnk->fvalue = va_arg (args, double); + + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->fvalue = cnk->fvalue; + } + break; + + case CNK_CHAR: + cnk->value = va_arg (args, int); + + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->value = cnk->value; + } + break; + + case CNK_STRING: + cnk->strvalue = va_arg (args, char *); + if (!cnk->strvalue) cnk->strvalue = "(NULL)"; + + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->strvalue = cnk->strvalue; + } + break; + + case CNK_PTR: + cnk->strvalue = va_arg (args, void *); + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->strvalue = cnk->strvalue; + } + break; + + case CNK_NUM: + if (cnk->cflags == DP_C_CHAR) + cnk->pnum = va_arg (args, char *); + else if (cnk->cflags == DP_C_SHORT) + cnk->pnum = va_arg (args, short int *); + else if (cnk->cflags == DP_C_LONG) + cnk->pnum = va_arg (args, long int *); + else if (cnk->cflags == DP_C_LLONG) + cnk->pnum = va_arg (args, LLONG *); + else if (cnk->cflags == DP_C_SIZET) + cnk->pnum = va_arg (args, ssize_t *); + else + cnk->pnum = va_arg (args, int *); + + for (i = 1; i < clist[pnum].num; i++) { + clist[pnum].chunks[i]->pnum = cnk->pnum; + } + break; + + case CNK_PRCNT: + break; + + default: + /* what ?? */ + goto done; + } + } + /* print out the actual string from chunks */ + currlen = 0; + cnk = chunks; + while (cnk) { + int len, min, max; + + if (cnk->min_star) min = cnk->min_star->value; + else min = cnk->min; + if (cnk->max_star) max = cnk->max_star->value; + else max = cnk->max; + + switch (cnk->type) { + + case CNK_FMT_STR: + if (maxlen != 0 && maxlen > currlen) { + if (maxlen > (currlen + cnk->len)) len = cnk->len; + else len = maxlen - currlen; + + memcpy(&(buffer[currlen]), &(base[cnk->start]), len); + } + currlen += cnk->len; + + break; + + case CNK_INT: + case CNK_UINT: + fmtint (buffer, &currlen, maxlen, cnk->value, 10, min, max, cnk->flags); + break; + + case CNK_OCTAL: + fmtint (buffer, &currlen, maxlen, cnk->value, 8, min, max, cnk->flags); + break; + + case CNK_HEX: + fmtint (buffer, &currlen, maxlen, cnk->value, 16, min, max, cnk->flags); + break; + + case CNK_FLOAT: + fmtfp (buffer, &currlen, maxlen, cnk->fvalue, min, max, cnk->flags); + break; + + case CNK_CHAR: + dopr_outch (buffer, &currlen, maxlen, cnk->value); + break; + + case CNK_STRING: + if (max == -1) { + max = strlen(cnk->strvalue); + } + fmtstr (buffer, &currlen, maxlen, cnk->strvalue, cnk->flags, min, max); + break; + + case CNK_PTR: + fmtint (buffer, &currlen, maxlen, (long)(cnk->strvalue), 16, min, max, cnk->flags); + break; + + case CNK_NUM: + if (cnk->cflags == DP_C_CHAR) + *((char *)(cnk->pnum)) = (char)currlen; + else if (cnk->cflags == DP_C_SHORT) + *((short int *)(cnk->pnum)) = (short int)currlen; + else if (cnk->cflags == DP_C_LONG) + *((long int *)(cnk->pnum)) = (long int)currlen; + else if (cnk->cflags == DP_C_LLONG) + *((LLONG *)(cnk->pnum)) = (LLONG)currlen; + else if (cnk->cflags == DP_C_SIZET) + *((ssize_t *)(cnk->pnum)) = (ssize_t)currlen; + else + *((int *)(cnk->pnum)) = (int)currlen; + break; + + case CNK_PRCNT: + dopr_outch (buffer, &currlen, maxlen, '%'); + break; + + default: + /* what ?? */ + goto done; + } + cnk = cnk->next; + } + if (maxlen != 0) { + if (currlen < maxlen - 1) + buffer[currlen] = '\0'; + else if (maxlen > 0) + buffer[maxlen - 1] = '\0'; + } + ret = currlen; + +done: + va_end(args); + + while (chunks) { + cnk = chunks->next; + if (chunks->min_star) free(chunks->min_star); + if (chunks->max_star) free(chunks->max_star); + free(chunks); + chunks = cnk; + } + if (clist) { + for (pnum = 0; pnum < max_pos; pnum++) { + if (clist[pnum].chunks) free(clist[pnum].chunks); + } + free(clist); + } + return ret; +} + +static void fmtstr(char *buffer, size_t *currlen, size_t maxlen, + char *value, int flags, int min, int max) +{ + int padlen, strln; /* amount to pad */ + int cnt = 0; + +#ifdef DEBUG_SNPRINTF + printf("fmtstr min=%d max=%d s=[%s]\n", min, max, value); +#endif + if (value == 0) { + value = "<NULL>"; + } + + for (strln = 0; strln < max && value[strln]; ++strln); /* strlen */ + padlen = min - strln; + if (padlen < 0) + padlen = 0; + if (flags & DP_F_MINUS) + padlen = -padlen; /* Left Justify */ + + while (padlen > 0) { + dopr_outch (buffer, currlen, maxlen, ' '); + --padlen; + } + while (*value && (cnt < max)) { + dopr_outch (buffer, currlen, maxlen, *value++); + ++cnt; + } + while (padlen < 0) { + dopr_outch (buffer, currlen, maxlen, ' '); + ++padlen; + } +} + +/* Have to handle DP_F_NUM (ie 0x and 0 alternates) */ + +static void fmtint(char *buffer, size_t *currlen, size_t maxlen, + LLONG value, int base, int min, int max, int flags) +{ + int signvalue = 0; + unsigned LLONG uvalue; + char convert[22+1]; /* 64-bit value in octal: 22 digits + \0 */ + int place = 0; + int spadlen = 0; /* amount to space pad */ + int zpadlen = 0; /* amount to zero pad */ + int caps = 0; + + if (max < 0) + max = 0; + + uvalue = value; + + if(!(flags & DP_F_UNSIGNED)) { + if( value < 0 ) { + signvalue = '-'; + uvalue = -value; + } else { + if (flags & DP_F_PLUS) /* Do a sign (+/i) */ + signvalue = '+'; + else if (flags & DP_F_SPACE) + signvalue = ' '; + } + } + + if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ + + do { + convert[place++] = + (caps? "0123456789ABCDEF":"0123456789abcdef") + [uvalue % (unsigned)base ]; + uvalue = (uvalue / (unsigned)base ); + } while(uvalue && (place < sizeof(convert))); + if (place == sizeof(convert)) place--; + convert[place] = 0; + + zpadlen = max - place; + spadlen = min - MAX (max, place) - (signvalue ? 1 : 0); + if (zpadlen < 0) zpadlen = 0; + if (spadlen < 0) spadlen = 0; + if (flags & DP_F_ZERO) { + zpadlen = MAX(zpadlen, spadlen); + spadlen = 0; + } + if (flags & DP_F_MINUS) + spadlen = -spadlen; /* Left Justifty */ + +#ifdef DEBUG_SNPRINTF + printf("zpad: %d, spad: %d, min: %d, max: %d, place: %d\n", + zpadlen, spadlen, min, max, place); +#endif + + /* Spaces */ + while (spadlen > 0) { + dopr_outch (buffer, currlen, maxlen, ' '); + --spadlen; + } + + /* Sign */ + if (signvalue) + dopr_outch (buffer, currlen, maxlen, signvalue); + + /* Zeros */ + if (zpadlen > 0) { + while (zpadlen > 0) { + dopr_outch (buffer, currlen, maxlen, '0'); + --zpadlen; + } + } + + /* Digits */ + while (place > 0) + dopr_outch (buffer, currlen, maxlen, convert[--place]); + + /* Left Justified spaces */ + while (spadlen < 0) { + dopr_outch (buffer, currlen, maxlen, ' '); + ++spadlen; + } +} + +static LDOUBLE abs_val(LDOUBLE value) +{ + LDOUBLE result = value; + + if (value < 0) + result = -value; + + return result; +} + +static LDOUBLE POW10(int exp) +{ + LDOUBLE result = 1; + + while (exp) { + result *= 10; + exp--; + } + + return result; +} + +static LLONG ROUND(LDOUBLE value) +{ + LLONG intpart; + + intpart = (LLONG)value; + value = value - intpart; + if (value >= 0.5) intpart++; + + return intpart; +} + +/* a replacement for modf that doesn't need the math library. Should + be portable, but slow */ +static double my_modf(double x0, double *iptr) +{ + int i; + LLONG l=0; + double x = x0; + double f = 1.0; + + for (i=0;i<100;i++) { + l = (long)x; + if (l <= (x+1) && l >= (x-1)) break; + x *= 0.1; + f *= 10.0; + } + + if (i == 100) { + /* yikes! the number is beyond what we can handle. What do we do? */ + (*iptr) = 0; + return 0; + } + + if (i != 0) { + double i2; + double ret; + + ret = my_modf(x0-l*f, &i2); + (*iptr) = l*f + i2; + return ret; + } + + (*iptr) = l; + return x - (*iptr); +} + + +static void fmtfp (char *buffer, size_t *currlen, size_t maxlen, + LDOUBLE fvalue, int min, int max, int flags) +{ + int signvalue = 0; + double ufvalue; + char iconvert[311]; + char fconvert[311]; + int iplace = 0; + int fplace = 0; + int padlen = 0; /* amount to pad */ + int zpadlen = 0; + int caps = 0; + int idx; + double intpart; + double fracpart; + double temp; + + /* + * AIX manpage says the default is 0, but Solaris says the default + * is 6, and sprintf on AIX defaults to 6 + */ + if (max < 0) + max = 6; + + ufvalue = abs_val (fvalue); + + if (fvalue < 0) { + signvalue = '-'; + } else { + if (flags & DP_F_PLUS) { /* Do a sign (+/i) */ + signvalue = '+'; + } else { + if (flags & DP_F_SPACE) + signvalue = ' '; + } + } + +#if 0 + if (flags & DP_F_UP) caps = 1; /* Should characters be upper case? */ +#endif + +#if 0 + if (max == 0) ufvalue += 0.5; /* if max = 0 we must round */ +#endif + + /* + * Sorry, we only support 9 digits past the decimal because of our + * conversion method + */ + if (max > 9) + max = 9; + + /* We "cheat" by converting the fractional part to integer by + * multiplying by a factor of 10 + */ + + temp = ufvalue; + my_modf(temp, &intpart); + + fracpart = ROUND((POW10(max)) * (ufvalue - intpart)); + + if (fracpart >= POW10(max)) { + intpart++; + fracpart -= POW10(max); + } + + + /* Convert integer part */ + do { + temp = intpart*0.1; + my_modf(temp, &intpart); + idx = (int) ((temp -intpart +0.05)* 10.0); + /* idx = (int) (((double)(temp*0.1) -intpart +0.05) *10.0); */ + /* printf ("%llf, %f, %x\n", temp, intpart, idx); */ + iconvert[iplace++] = + (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; + } while (intpart && (iplace < 311)); + if (iplace == 311) iplace--; + iconvert[iplace] = 0; + + /* Convert fractional part */ + if (fracpart) + { + do { + temp = fracpart*0.1; + my_modf(temp, &fracpart); + idx = (int) ((temp -fracpart +0.05)* 10.0); + /* idx = (int) ((((temp/10) -fracpart) +0.05) *10); */ + /* printf ("%lf, %lf, %ld\n", temp, fracpart, idx ); */ + fconvert[fplace++] = + (caps? "0123456789ABCDEF":"0123456789abcdef")[idx]; + } while(fracpart && (fplace < 311)); + if (fplace == 311) fplace--; + } + fconvert[fplace] = 0; + + /* -1 for decimal point, another -1 if we are printing a sign */ + padlen = min - iplace - max - 1 - ((signvalue) ? 1 : 0); + zpadlen = max - fplace; + if (zpadlen < 0) zpadlen = 0; + if (padlen < 0) + padlen = 0; + if (flags & DP_F_MINUS) + padlen = -padlen; /* Left Justifty */ + + if ((flags & DP_F_ZERO) && (padlen > 0)) { + if (signvalue) { + dopr_outch (buffer, currlen, maxlen, signvalue); + --padlen; + signvalue = 0; + } + while (padlen > 0) { + dopr_outch (buffer, currlen, maxlen, '0'); + --padlen; + } + } + while (padlen > 0) { + dopr_outch (buffer, currlen, maxlen, ' '); + --padlen; + } + if (signvalue) + dopr_outch (buffer, currlen, maxlen, signvalue); + + while (iplace > 0) + dopr_outch (buffer, currlen, maxlen, iconvert[--iplace]); + +#ifdef DEBUG_SNPRINTF + printf("fmtfp: fplace=%d zpadlen=%d\n", fplace, zpadlen); +#endif + + /* + * Decimal point. This should probably use locale to find the correct + * char to print out. + */ + if (max > 0) { + dopr_outch (buffer, currlen, maxlen, '.'); + + while (zpadlen > 0) { + dopr_outch (buffer, currlen, maxlen, '0'); + --zpadlen; + } + + while (fplace > 0) + dopr_outch (buffer, currlen, maxlen, fconvert[--fplace]); + } + + while (padlen < 0) { + dopr_outch (buffer, currlen, maxlen, ' '); + ++padlen; + } +} + +static void dopr_outch(char *buffer, size_t *currlen, size_t maxlen, char c) +{ + if (*currlen < maxlen) { + buffer[(*currlen)] = c; + } + (*currlen)++; +} + +static struct pr_chunk *new_chunk(void) { + struct pr_chunk *new_c = (struct pr_chunk *)malloc(sizeof(struct pr_chunk)); + + if (!new_c) + return NULL; + + new_c->type = 0; + new_c->num = 0; + new_c->min = 0; + new_c->min_star = NULL; + new_c->max = -1; + new_c->max_star = NULL; + new_c->flags = 0; + new_c->cflags = 0; + new_c->start = 0; + new_c->len = 0; + new_c->value = 0; + new_c->fvalue = 0; + new_c->strvalue = NULL; + new_c->pnum = NULL; + new_c->next = NULL; + + return new_c; +} + +static int add_cnk_list_entry(struct pr_chunk_x **list, + int max_num, struct pr_chunk *chunk) { + struct pr_chunk_x *l; + struct pr_chunk **c; + int max; + int cnum; + int i, pos; + + if (chunk->num > max_num) { + max = chunk->num; + + if (*list == NULL) { + l = (struct pr_chunk_x *)malloc(sizeof(struct pr_chunk_x) * max); + pos = 0; + } else { + l = (struct pr_chunk_x *)realloc(*list, sizeof(struct pr_chunk_x) * max); + pos = max_num; + } + if (l == NULL) { + for (i = 0; i < max; i++) { + if ((*list)[i].chunks) free((*list)[i].chunks); + } + return 0; + } + for (i = pos; i < max; i++) { + l[i].chunks = NULL; + l[i].num = 0; + } + } else { + l = *list; + max = max_num; + } + + i = chunk->num - 1; + cnum = l[i].num + 1; + if (l[i].chunks == NULL) { + c = (struct pr_chunk **)malloc(sizeof(struct pr_chunk *) * cnum); + } else { + c = (struct pr_chunk **)realloc(l[i].chunks, sizeof(struct pr_chunk *) * cnum); + } + if (c == NULL) { + for (i = 0; i < max; i++) { + if (l[i].chunks) free(l[i].chunks); + } + return 0; + } + c[l[i].num] = chunk; + l[i].chunks = c; + l[i].num = cnum; + + *list = l; + return max; +} + + int rep_vsnprintf (char *str, size_t count, const char *fmt, va_list args) +{ + return dopr(str, count, fmt, args); +} +#endif + +/* yes this really must be a ||. Don't muck with this (tridge) + * + * The logic for these two is that we need our own definition if the + * OS *either* has no definition of *sprintf, or if it does have one + * that doesn't work properly according to the autoconf test. + */ +#if !defined(HAVE_SNPRINTF) || !defined(HAVE_C99_VSNPRINTF) + int rep_snprintf(char *str,size_t count,const char *fmt,...) +{ + size_t ret; + va_list ap; + + va_start(ap, fmt); + ret = vsnprintf(str, count, fmt, ap); + va_end(ap); + return ret; +} +#endif + +#ifndef HAVE_C99_VSNPRINTF + int rep_printf(const char *fmt, ...) +{ + va_list ap; + int ret; + char *s; + + s = NULL; + va_start(ap, fmt); + ret = vasprintf(&s, fmt, ap); + va_end(ap); + + if (s) { + fwrite(s, 1, strlen(s), stdout); + } + free(s); + + return ret; +} +#endif + +#ifndef HAVE_C99_VSNPRINTF + int rep_fprintf(FILE *stream, const char *fmt, ...) +{ + va_list ap; + int ret; + char *s; + + s = NULL; + va_start(ap, fmt); + ret = vasprintf(&s, fmt, ap); + va_end(ap); + + if (s) { + fwrite(s, 1, strlen(s), stream); + } + free(s); + + return ret; +} +#endif + +#endif + +#if !defined(HAVE_VASPRINTF) || !defined(HAVE_C99_VSNPRINTF) + int rep_vasprintf(char **ptr, const char *format, va_list ap) +{ + int ret; + va_list ap2; + + VA_COPY(ap2, ap); + ret = vsnprintf(NULL, 0, format, ap2); + va_end(ap2); + if (ret < 0) return ret; + + (*ptr) = (char *)malloc(ret+1); + if (!*ptr) return -1; + + VA_COPY(ap2, ap); + ret = vsnprintf(*ptr, ret+1, format, ap2); + va_end(ap2); + + return ret; +} +#endif + +#if !defined(HAVE_ASPRINTF) || !defined(HAVE_C99_VSNPRINTF) + int rep_asprintf(char **ptr, const char *format, ...) +{ + va_list ap; + int ret; + + *ptr = NULL; + va_start(ap, format); + ret = vasprintf(ptr, format, ap); + va_end(ap); + + return ret; +} +#endif + +#ifdef TEST_SNPRINTF + + int sprintf(char *str,const char *fmt,...); + int printf(const char *fmt,...); + + int main (void) +{ + char buf1[1024]; + char buf2[1024]; + char *buf3; + char *fp_fmt[] = { + "%1.1f", + "%-1.5f", + "%1.5f", + "%123.9f", + "%10.5f", + "% 10.5f", + "%+22.9f", + "%+4.9f", + "%01.3f", + "%4f", + "%3.1f", + "%3.2f", + "%.0f", + "%f", + "%-8.8f", + "%-9.9f", + NULL + }; + double fp_nums[] = { 6442452944.1234, -1.5, 134.21, 91340.2, 341.1234, 203.9, 0.96, 0.996, + 0.9996, 1.996, 4.136, 5.030201, 0.00205, + /* END LIST */ 0}; + char *int_fmt[] = { + "%-1.5d", + "%1.5d", + "%123.9d", + "%5.5d", + "%10.5d", + "% 10.5d", + "%+22.33d", + "%01.3d", + "%4d", + "%d", + NULL + }; + long int_nums[] = { -1, 134, 91340, 341, 0203, 1234567890, 0}; + char *str_fmt[] = { + "%10.5s", + "%-10.5s", + "%5.10s", + "%-5.10s", + "%10.1s", + "%0.10s", + "%10.0s", + "%1.10s", + "%s", + "%.1s", + "%.10s", + "%10s", + NULL + }; + char *str_vals[] = {"hello", "a", "", "a longer string", NULL}; +#ifdef HAVE_LONG_LONG + char *ll_fmt[] = { + "%llu", + NULL + }; + LLONG ll_nums[] = { 134, 91340, 341, 0203, 1234567890, 128006186140000000LL, 0}; +#endif + int x, y; + int fail = 0; + int num = 0; + int l1, l2; + char *ss_fmt[] = { + "%zd", + "%zu", + NULL + }; + size_t ss_nums[] = {134, 91340, 123456789, 0203, 1234567890, 0}; + + printf ("Testing snprintf format codes against system sprintf...\n"); + + for (x = 0; fp_fmt[x] ; x++) { + for (y = 0; fp_nums[y] != 0 ; y++) { + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), fp_fmt[x], fp_nums[y]); + l2 = sprintf (buf2, fp_fmt[x], fp_nums[y]); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp (buf1, buf2) || (l1 != l2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + fp_fmt[x], l1, buf1, l2, buf2); + fail++; + } + num++; + } + } + + for (x = 0; int_fmt[x] ; x++) { + for (y = 0; int_nums[y] != 0 ; y++) { + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), int_fmt[x], int_nums[y]); + l2 = sprintf (buf2, int_fmt[x], int_nums[y]); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp (buf1, buf2) || (l1 != l2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + int_fmt[x], l1, buf1, l2, buf2); + fail++; + } + num++; + } + } + + for (x = 0; str_fmt[x] ; x++) { + for (y = 0; str_vals[y] != 0 ; y++) { + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), str_fmt[x], str_vals[y]); + l2 = sprintf (buf2, str_fmt[x], str_vals[y]); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp (buf1, buf2) || (l1 != l2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + str_fmt[x], l1, buf1, l2, buf2); + fail++; + } + num++; + } + } + +#ifdef HAVE_LONG_LONG + for (x = 0; ll_fmt[x] ; x++) { + for (y = 0; ll_nums[y] != 0 ; y++) { + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), ll_fmt[x], ll_nums[y]); + l2 = sprintf (buf2, ll_fmt[x], ll_nums[y]); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp (buf1, buf2) || (l1 != l2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + ll_fmt[x], l1, buf1, l2, buf2); + fail++; + } + num++; + } + } +#endif + +#define BUFSZ 2048 + + buf1[0] = buf2[0] = '\0'; + if ((buf3 = malloc(BUFSZ)) == NULL) { + fail++; + } else { + num++; + memset(buf3, 'a', BUFSZ); + snprintf(buf1, sizeof(buf1), "%.*s", 1, buf3); + buf1[1023] = '\0'; + if (strcmp(buf1, "a") != 0) { + printf("length limit buf1 '%s' expected 'a'\n", buf1); + fail++; + } + } + + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); + l2 = sprintf(buf2, "%4$*1$d %2$s %3$*1$.*1$f", 3, "pos test", 12.3456, 9); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp(buf1, buf2) || (l1 != l2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); + fail++; + } + + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); + l2 = sprintf(buf2, "%4$*4$d %2$s %3$*4$.*4$f", 3, "pos test", 12.3456, 9); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp(buf1, buf2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + "%4$*1$d %2$s %3$*1$.*1$f", l1, buf1, l2, buf2); + fail++; + } + + for (x = 0; ss_fmt[x] ; x++) { + for (y = 0; ss_nums[y] != 0 ; y++) { + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), ss_fmt[x], ss_nums[y]); + l2 = sprintf (buf2, ss_fmt[x], ss_nums[y]); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp (buf1, buf2) || (l1 != l2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + ss_fmt[x], l1, buf1, l2, buf2); + fail++; + } + num++; + } + } +#if 0 + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), "%lld", (LLONG)1234567890); + l2 = sprintf(buf2, "%lld", (LLONG)1234567890); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp(buf1, buf2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + "%lld", l1, buf1, l2, buf2); + fail++; + } + + buf1[0] = buf2[0] = '\0'; + l1 = snprintf(buf1, sizeof(buf1), "%Lf", (LDOUBLE)890.1234567890123); + l2 = sprintf(buf2, "%Lf", (LDOUBLE)890.1234567890123); + buf1[1023] = buf2[1023] = '\0'; + if (strcmp(buf1, buf2)) { + printf("snprintf doesn't match Format: %s\n\tsnprintf(%d) = [%s]\n\t sprintf(%d) = [%s]\n", + "%Lf", l1, buf1, l2, buf2); + fail++; + } +#endif + printf ("%d tests failed out of %d.\n", fail, num); + + printf("seeing how many digits we support\n"); + { + double v0 = 0.12345678901234567890123456789012345678901; + for (x=0; x<100; x++) { + double p = pow(10, x); + double r = v0*p; + snprintf(buf1, sizeof(buf1), "%1.1f", r); + sprintf(buf2, "%1.1f", r); + if (strcmp(buf1, buf2)) { + printf("we seem to support %d digits\n", x-1); + break; + } + } + } + + return 0; +} +#endif /* TEST_SNPRINTF */ diff --git a/lib/replace/socket.c b/lib/replace/socket.c new file mode 100644 index 0000000..4cd9d2e --- /dev/null +++ b/lib/replace/socket.c @@ -0,0 +1,39 @@ +/* + * Unix SMB/CIFS implementation. + * + * Dummy replacements for socket functions. + * + * Copyright (C) Michael Adam <obnox@samba.org> 2008 + * + * ** NOTE! The following LGPL license applies to the replace + * ** library. This does NOT imply that all of Samba is released + * ** under the LGPL + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "replace.h" +#include "system/network.h" + +int rep_connect(int sockfd, const struct sockaddr *serv_addr, socklen_t addrlen) +{ + errno = ENOSYS; + return -1; +} + +struct hostent *rep_gethostbyname(const char *name) +{ + errno = ENOSYS; + return NULL; +} diff --git a/lib/replace/socketpair.c b/lib/replace/socketpair.c new file mode 100644 index 0000000..c775730 --- /dev/null +++ b/lib/replace/socketpair.c @@ -0,0 +1,46 @@ +/* + * Unix SMB/CIFS implementation. + * replacement routines for broken systems + * Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2006 + * Copyright (C) Michael Adam <obnox@samba.org> 2008 + * + * ** NOTE! The following LGPL license applies to the replace + * ** library. This does NOT imply that all of Samba is released + * ** under the LGPL + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#include "replace.h" +#include "system/network.h" + +int rep_socketpair(int d, int type, int protocol, int sv[2]) +{ + if (d != AF_UNIX) { + errno = EAFNOSUPPORT; + return -1; + } + + if (protocol != 0) { + errno = EPROTONOSUPPORT; + return -1; + } + + if (type != SOCK_STREAM) { + errno = EOPNOTSUPP; + return -1; + } + + return pipe(sv); +} diff --git a/lib/replace/strptime.c b/lib/replace/strptime.c new file mode 100644 index 0000000..56ea1e0 --- /dev/null +++ b/lib/replace/strptime.c @@ -0,0 +1,995 @@ +/* Convert a string representation of time to a time value. + Copyright (C) 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc. + This file is part of the GNU C Library. + Contributed by Ulrich Drepper <drepper@cygnus.com>, 1996. + + The GNU C Library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 3 of the + License, or (at your option) any later version. + + The GNU C Library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Library General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with the GNU C Library; see the file COPYING.LIB. If not, + see <http://www.gnu.org/licenses/>. */ + +/* XXX This version of the implementation is not really complete. + Some of the fields cannot add information alone. But if seeing + some of them in the same format (such as year, week and weekday) + this is enough information for determining the date. */ + +#include "replace.h" +#include "system/locale.h" +#include "system/time.h" + +#ifndef __P +# if defined (__GNUC__) || (defined (__STDC__) && __STDC__) +# define __P(args) args +# else +# define __P(args) () +# endif /* GCC. */ +#endif /* Not __P. */ + +#if ! HAVE_LOCALTIME_R && ! defined localtime_r +# ifdef _LIBC +# define localtime_r __localtime_r +# else +/* Approximate localtime_r as best we can in its absence. */ +# define localtime_r my_localtime_r +static struct tm *localtime_r __P ((const time_t *, struct tm *)); +static struct tm * +localtime_r (t, tp) + const time_t *t; + struct tm *tp; +{ + struct tm *l = localtime (t); + if (! l) + return 0; + *tp = *l; + return tp; +} +# endif /* ! _LIBC */ +#endif /* ! HAVE_LOCALTIME_R && ! defined (localtime_r) */ + + +#define match_char(ch1, ch2) if (ch1 != ch2) return NULL +#if defined __GNUC__ && __GNUC__ >= 2 +# define match_string(cs1, s2) \ + ({ size_t len = strlen (cs1); \ + int result = strncasecmp ((cs1), (s2), len) == 0; \ + if (result) (s2) += len; \ + result; }) +#else +/* Oh come on. Get a reasonable compiler. */ +# define match_string(cs1, s2) \ + (strncasecmp ((cs1), (s2), strlen (cs1)) ? 0 : ((s2) += strlen (cs1), 1)) +#endif +/* We intentionally do not use isdigit() for testing because this will + lead to problems with the wide character version. */ +#define get_number(from, to, n) \ + do { \ + int __n = n; \ + val = 0; \ + while (*rp == ' ') \ + ++rp; \ + if (*rp < '0' || *rp > '9') \ + return NULL; \ + do { \ + val *= 10; \ + val += *rp++ - '0'; \ + } while (--__n > 0 && val * 10 <= to && *rp >= '0' && *rp <= '9'); \ + if (val < from || val > to) \ + return NULL; \ + } while (0) +#ifdef _NL_CURRENT +# define get_alt_number(from, to, n) \ + ({ \ + __label__ do_normal; \ + if (*decided != raw) \ + { \ + const char *alts = _NL_CURRENT (LC_TIME, ALT_DIGITS); \ + int __n = n; \ + int any = 0; \ + while (*rp == ' ') \ + ++rp; \ + val = 0; \ + do { \ + val *= 10; \ + while (*alts != '\0') \ + { \ + size_t len = strlen (alts); \ + if (strncasecmp (alts, rp, len) == 0) \ + break; \ + alts += len + 1; \ + ++val; \ + } \ + if (*alts == '\0') \ + { \ + if (*decided == not && ! any) \ + goto do_normal; \ + /* If we haven't read anything it's an error. */ \ + if (! any) \ + return NULL; \ + /* Correct the premature multiplication. */ \ + val /= 10; \ + break; \ + } \ + else \ + *decided = loc; \ + } while (--__n > 0 && val * 10 <= to); \ + if (val < from || val > to) \ + return NULL; \ + } \ + else \ + { \ + do_normal: \ + get_number (from, to, n); \ + } \ + 0; \ + }) +#else +# define get_alt_number(from, to, n) \ + /* We don't have the alternate representation. */ \ + get_number(from, to, n) +#endif +#define recursive(new_fmt) \ + (*(new_fmt) != '\0' \ + && (rp = strptime_internal (rp, (new_fmt), tm, decided, era_cnt)) != NULL) + + +#ifdef _LIBC +/* This is defined in locale/C-time.c in the GNU libc. */ +extern const struct locale_data _nl_C_LC_TIME; +extern const unsigned short int __mon_yday[2][13]; + +# define weekday_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (DAY_1)].string) +# define ab_weekday_name \ + (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABDAY_1)].string) +# define month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (MON_1)].string) +# define ab_month_name (&_nl_C_LC_TIME.values[_NL_ITEM_INDEX (ABMON_1)].string) +# define HERE_D_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_T_FMT)].string) +# define HERE_D_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (D_FMT)].string) +# define HERE_AM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (AM_STR)].string) +# define HERE_PM_STR (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (PM_STR)].string) +# define HERE_T_FMT_AMPM \ + (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT_AMPM)].string) +# define HERE_T_FMT (_nl_C_LC_TIME.values[_NL_ITEM_INDEX (T_FMT)].string) + +# define strncasecmp(s1, s2, n) __strncasecmp (s1, s2, n) +#else +static char const weekday_name[][10] = + { + "Sunday", "Monday", "Tuesday", "Wednesday", + "Thursday", "Friday", "Saturday" + }; +static char const ab_weekday_name[][4] = + { + "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat" + }; +static char const month_name[][10] = + { + "January", "February", "March", "April", "May", "June", + "July", "August", "September", "October", "November", "December" + }; +static char const ab_month_name[][4] = + { + "Jan", "Feb", "Mar", "Apr", "May", "Jun", + "Jul", "Aug", "Sep", "Oct", "Nov", "Dec" + }; +# define HERE_D_T_FMT "%a %b %e %H:%M:%S %Y" +# define HERE_D_FMT "%m/%d/%y" +# define HERE_AM_STR "AM" +# define HERE_PM_STR "PM" +# define HERE_T_FMT_AMPM "%I:%M:%S %p" +# define HERE_T_FMT "%H:%M:%S" + +static const unsigned short int __mon_yday[2][13] = + { + /* Normal years. */ + { 0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + /* Leap years. */ + { 0, 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } + }; +#endif + +/* Status of lookup: do we use the locale data or the raw data? */ +enum locale_status { not, loc, raw }; + + +#ifndef __isleap +/* Nonzero if YEAR is a leap year (every 4 years, + except every 100th isn't, and every 400th is). */ +# define __isleap(year) \ + ((year) % 4 == 0 && ((year) % 100 != 0 || (year) % 400 == 0)) +#endif + +/* Compute the day of the week. */ +static void +day_of_the_week (struct tm *tm) +{ + /* We know that January 1st 1970 was a Thursday (= 4). Compute the + the difference between this data in the one on TM and so determine + the weekday. */ + int corr_year = 1900 + tm->tm_year - (tm->tm_mon < 2); + int wday = (-473 + + (365 * (tm->tm_year - 70)) + + (corr_year / 4) + - ((corr_year / 4) / 25) + ((corr_year / 4) % 25 < 0) + + (((corr_year / 4) / 25) / 4) + + __mon_yday[0][tm->tm_mon] + + tm->tm_mday - 1); + tm->tm_wday = ((wday % 7) + 7) % 7; +} + +/* Compute the day of the year. */ +static void +day_of_the_year (struct tm *tm) +{ + tm->tm_yday = (__mon_yday[__isleap (1900 + tm->tm_year)][tm->tm_mon] + + (tm->tm_mday - 1)); +} + +static char * +#ifdef _LIBC +internal_function +#endif +strptime_internal __P ((const char *rp, const char *fmt, struct tm *tm, + enum locale_status *decided, int era_cnt)); + +static char * +#ifdef _LIBC +internal_function +#endif +strptime_internal (rp, fmt, tm, decided, era_cnt) + const char *rp; + const char *fmt; + struct tm *tm; + enum locale_status *decided; + int era_cnt; +{ + int cnt; + size_t val; + int have_I, is_pm; + int century, want_century; + int want_era; + int have_wday, want_xday; + int have_yday; + int have_mon, have_mday; +#ifdef _NL_CURRENT + const char *rp_backup; + size_t num_eras; + struct era_entry *era; + + era = NULL; +#endif + + have_I = is_pm = 0; + century = -1; + want_century = 0; + want_era = 0; + + have_wday = want_xday = have_yday = have_mon = have_mday = 0; + + while (*fmt != '\0') + { + /* A white space in the format string matches 0 more or white + space in the input string. */ + if (isspace (*fmt)) + { + while (isspace (*rp)) + ++rp; + ++fmt; + continue; + } + + /* Any character but `%' must be matched by the same character + in the input string. */ + if (*fmt != '%') + { + match_char (*fmt++, *rp++); + continue; + } + + ++fmt; +#ifndef _NL_CURRENT + /* We need this for handling the `E' modifier. */ + start_over: +#endif + +#ifdef _NL_CURRENT + /* Make back up of current processing pointer. */ + rp_backup = rp; +#endif + + switch (*fmt++) + { + case '%': + /* Match the `%' character itself. */ + match_char ('%', *rp++); + break; + case 'a': + case 'A': + /* Match day of week. */ + for (cnt = 0; cnt < 7; ++cnt) + { +#ifdef _NL_CURRENT + if (*decided !=raw) + { + if (match_string (_NL_CURRENT (LC_TIME, DAY_1 + cnt), rp)) + { + if (*decided == not + && strcmp (_NL_CURRENT (LC_TIME, DAY_1 + cnt), + weekday_name[cnt])) + *decided = loc; + break; + } + if (match_string (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), rp)) + { + if (*decided == not + && strcmp (_NL_CURRENT (LC_TIME, ABDAY_1 + cnt), + ab_weekday_name[cnt])) + *decided = loc; + break; + } + } +#endif + if (*decided != loc + && (match_string (weekday_name[cnt], rp) + || match_string (ab_weekday_name[cnt], rp))) + { + *decided = raw; + break; + } + } + if (cnt == 7) + /* Does not match a weekday name. */ + return NULL; + tm->tm_wday = cnt; + have_wday = 1; + break; + case 'b': + case 'B': + case 'h': + /* Match month name. */ + for (cnt = 0; cnt < 12; ++cnt) + { +#ifdef _NL_CURRENT + if (*decided !=raw) + { + if (match_string (_NL_CURRENT (LC_TIME, MON_1 + cnt), rp)) + { + if (*decided == not + && strcmp (_NL_CURRENT (LC_TIME, MON_1 + cnt), + month_name[cnt])) + *decided = loc; + break; + } + if (match_string (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), rp)) + { + if (*decided == not + && strcmp (_NL_CURRENT (LC_TIME, ABMON_1 + cnt), + ab_month_name[cnt])) + *decided = loc; + break; + } + } +#endif + if (match_string (month_name[cnt], rp) + || match_string (ab_month_name[cnt], rp)) + { + *decided = raw; + break; + } + } + if (cnt == 12) + /* Does not match a month name. */ + return NULL; + tm->tm_mon = cnt; + want_xday = 1; + break; + case 'c': + /* Match locale's date and time format. */ +#ifdef _NL_CURRENT + if (*decided != raw) + { + if (!recursive (_NL_CURRENT (LC_TIME, D_T_FMT))) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (*decided == not && + strcmp (_NL_CURRENT (LC_TIME, D_T_FMT), HERE_D_T_FMT)) + *decided = loc; + want_xday = 1; + break; + } + *decided = raw; + } +#endif + if (!recursive (HERE_D_T_FMT)) + return NULL; + want_xday = 1; + break; + case 'C': + /* Match century number. */ +#ifdef _NL_CURRENT + match_century: +#endif + get_number (0, 99, 2); + century = val; + want_xday = 1; + break; + case 'd': + case 'e': + /* Match day of month. */ + get_number (1, 31, 2); + tm->tm_mday = val; + have_mday = 1; + want_xday = 1; + break; + case 'F': + if (!recursive ("%Y-%m-%d")) + return NULL; + want_xday = 1; + break; + case 'x': +#ifdef _NL_CURRENT + if (*decided != raw) + { + if (!recursive (_NL_CURRENT (LC_TIME, D_FMT))) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (*decided == not + && strcmp (_NL_CURRENT (LC_TIME, D_FMT), HERE_D_FMT)) + *decided = loc; + want_xday = 1; + break; + } + *decided = raw; + } +#endif + + FALL_THROUGH; + case 'D': + /* Match standard day format. */ + if (!recursive (HERE_D_FMT)) + return NULL; + want_xday = 1; + break; + case 'k': + case 'H': + /* Match hour in 24-hour clock. */ + get_number (0, 23, 2); + tm->tm_hour = val; + have_I = 0; + break; + case 'I': + /* Match hour in 12-hour clock. */ + get_number (1, 12, 2); + tm->tm_hour = val % 12; + have_I = 1; + break; + case 'j': + /* Match day number of year. */ + get_number (1, 366, 3); + tm->tm_yday = val - 1; + have_yday = 1; + break; + case 'm': + /* Match number of month. */ + get_number (1, 12, 2); + tm->tm_mon = val - 1; + have_mon = 1; + want_xday = 1; + break; + case 'M': + /* Match minute. */ + get_number (0, 59, 2); + tm->tm_min = val; + break; + case 'n': + case 't': + /* Match any white space. */ + while (isspace (*rp)) + ++rp; + break; + case 'p': + /* Match locale's equivalent of AM/PM. */ +#ifdef _NL_CURRENT + if (*decided != raw) + { + if (match_string (_NL_CURRENT (LC_TIME, AM_STR), rp)) + { + if (strcmp (_NL_CURRENT (LC_TIME, AM_STR), HERE_AM_STR)) + *decided = loc; + break; + } + if (match_string (_NL_CURRENT (LC_TIME, PM_STR), rp)) + { + if (strcmp (_NL_CURRENT (LC_TIME, PM_STR), HERE_PM_STR)) + *decided = loc; + is_pm = 1; + break; + } + *decided = raw; + } +#endif + if (!match_string (HERE_AM_STR, rp)) { + if (match_string (HERE_PM_STR, rp)) { + is_pm = 1; + } else { + return NULL; + } + } + break; + case 'r': +#ifdef _NL_CURRENT + if (*decided != raw) + { + if (!recursive (_NL_CURRENT (LC_TIME, T_FMT_AMPM))) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (*decided == not && + strcmp (_NL_CURRENT (LC_TIME, T_FMT_AMPM), + HERE_T_FMT_AMPM)) + *decided = loc; + break; + } + *decided = raw; + } +#endif + if (!recursive (HERE_T_FMT_AMPM)) + return NULL; + break; + case 'R': + if (!recursive ("%H:%M")) + return NULL; + break; + case 's': + { + /* The number of seconds may be very high so we cannot use + the `get_number' macro. Instead read the number + character for character and construct the result while + doing this. */ + time_t secs = 0; + if (*rp < '0' || *rp > '9') + /* We need at least one digit. */ + return NULL; + + do + { + secs *= 10; + secs += *rp++ - '0'; + } + while (*rp >= '0' && *rp <= '9'); + + if (localtime_r (&secs, tm) == NULL) + /* Error in function. */ + return NULL; + } + break; + case 'S': + get_number (0, 61, 2); + tm->tm_sec = val; + break; + case 'X': +#ifdef _NL_CURRENT + if (*decided != raw) + { + if (!recursive (_NL_CURRENT (LC_TIME, T_FMT))) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (strcmp (_NL_CURRENT (LC_TIME, T_FMT), HERE_T_FMT)) + *decided = loc; + break; + } + *decided = raw; + } +#endif + + FALL_THROUGH; + case 'T': + if (!recursive (HERE_T_FMT)) + return NULL; + break; + case 'u': + get_number (1, 7, 1); + tm->tm_wday = val % 7; + have_wday = 1; + break; + case 'g': + get_number (0, 99, 2); + /* XXX This cannot determine any field in TM. */ + break; + case 'G': + if (*rp < '0' || *rp > '9') + return NULL; + /* XXX Ignore the number since we would need some more + information to compute a real date. */ + do + ++rp; + while (*rp >= '0' && *rp <= '9'); + break; + case 'U': + case 'V': + case 'W': + get_number (0, 53, 2); + /* XXX This cannot determine any field in TM without some + information. */ + break; + case 'w': + /* Match number of weekday. */ + get_number (0, 6, 1); + tm->tm_wday = val; + have_wday = 1; + break; + case 'y': +#ifdef _NL_CURRENT + match_year_in_century: +#endif + /* Match year within century. */ + get_number (0, 99, 2); + /* The "Year 2000: The Millennium Rollover" paper suggests that + values in the range 69-99 refer to the twentieth century. */ + tm->tm_year = val >= 69 ? val : val + 100; + /* Indicate that we want to use the century, if specified. */ + want_century = 1; + want_xday = 1; + break; + case 'Y': + /* Match year including century number. */ + get_number (0, 9999, 4); + tm->tm_year = val - 1900; + want_century = 0; + want_xday = 1; + break; + case 'Z': + /* XXX How to handle this? */ + break; + case 'E': +#ifdef _NL_CURRENT + switch (*fmt++) + { + case 'c': + /* Match locale's alternate date and time format. */ + if (*decided != raw) + { + const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_T_FMT); + + if (*fmt == '\0') + fmt = _NL_CURRENT (LC_TIME, D_T_FMT); + + if (!recursive (fmt)) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (strcmp (fmt, HERE_D_T_FMT)) + *decided = loc; + want_xday = 1; + break; + } + *decided = raw; + } + /* The C locale has no era information, so use the + normal representation. */ + if (!recursive (HERE_D_T_FMT)) + return NULL; + want_xday = 1; + break; + case 'C': + if (*decided != raw) + { + if (era_cnt >= 0) + { + era = _nl_select_era_entry (era_cnt); + if (match_string (era->era_name, rp)) + { + *decided = loc; + break; + } + else + return NULL; + } + else + { + num_eras = _NL_CURRENT_WORD (LC_TIME, + _NL_TIME_ERA_NUM_ENTRIES); + for (era_cnt = 0; era_cnt < (int) num_eras; + ++era_cnt, rp = rp_backup) + { + era = _nl_select_era_entry (era_cnt); + if (match_string (era->era_name, rp)) + { + *decided = loc; + break; + } + } + if (era_cnt == (int) num_eras) + { + era_cnt = -1; + if (*decided == loc) + return NULL; + } + else + break; + } + + *decided = raw; + } + /* The C locale has no era information, so use the + normal representation. */ + goto match_century; + case 'y': + if (*decided == raw) + goto match_year_in_century; + + get_number(0, 9999, 4); + tm->tm_year = val; + want_era = 1; + want_xday = 1; + break; + case 'Y': + if (*decided != raw) + { + num_eras = _NL_CURRENT_WORD (LC_TIME, + _NL_TIME_ERA_NUM_ENTRIES); + for (era_cnt = 0; era_cnt < (int) num_eras; + ++era_cnt, rp = rp_backup) + { + era = _nl_select_era_entry (era_cnt); + if (recursive (era->era_format)) + break; + } + if (era_cnt == (int) num_eras) + { + era_cnt = -1; + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + *decided = loc; + era_cnt = -1; + break; + } + + *decided = raw; + } + get_number (0, 9999, 4); + tm->tm_year = val - 1900; + want_century = 0; + want_xday = 1; + break; + case 'x': + if (*decided != raw) + { + const char *fmt = _NL_CURRENT (LC_TIME, ERA_D_FMT); + + if (*fmt == '\0') + fmt = _NL_CURRENT (LC_TIME, D_FMT); + + if (!recursive (fmt)) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (strcmp (fmt, HERE_D_FMT)) + *decided = loc; + break; + } + *decided = raw; + } + if (!recursive (HERE_D_FMT)) + return NULL; + break; + case 'X': + if (*decided != raw) + { + const char *fmt = _NL_CURRENT (LC_TIME, ERA_T_FMT); + + if (*fmt == '\0') + fmt = _NL_CURRENT (LC_TIME, T_FMT); + + if (!recursive (fmt)) + { + if (*decided == loc) + return NULL; + else + rp = rp_backup; + } + else + { + if (strcmp (fmt, HERE_T_FMT)) + *decided = loc; + break; + } + *decided = raw; + } + if (!recursive (HERE_T_FMT)) + return NULL; + break; + default: + return NULL; + } + break; +#else + /* We have no information about the era format. Just use + the normal format. */ + if (*fmt != 'c' && *fmt != 'C' && *fmt != 'y' && *fmt != 'Y' + && *fmt != 'x' && *fmt != 'X') + /* This is an illegal format. */ + return NULL; + + goto start_over; +#endif + case 'O': + switch (*fmt++) + { + case 'd': + case 'e': + /* Match day of month using alternate numeric symbols. */ + get_alt_number (1, 31, 2); + tm->tm_mday = val; + have_mday = 1; + want_xday = 1; + break; + case 'H': + /* Match hour in 24-hour clock using alternate numeric + symbols. */ + get_alt_number (0, 23, 2); + tm->tm_hour = val; + have_I = 0; + break; + case 'I': + /* Match hour in 12-hour clock using alternate numeric + symbols. */ + get_alt_number (1, 12, 2); + tm->tm_hour = val - 1; + have_I = 1; + break; + case 'm': + /* Match month using alternate numeric symbols. */ + get_alt_number (1, 12, 2); + tm->tm_mon = val - 1; + have_mon = 1; + want_xday = 1; + break; + case 'M': + /* Match minutes using alternate numeric symbols. */ + get_alt_number (0, 59, 2); + tm->tm_min = val; + break; + case 'S': + /* Match seconds using alternate numeric symbols. */ + get_alt_number (0, 61, 2); + tm->tm_sec = val; + break; + case 'U': + case 'V': + case 'W': + get_alt_number (0, 53, 2); + /* XXX This cannot determine any field in TM without + further information. */ + break; + case 'w': + /* Match number of weekday using alternate numeric symbols. */ + get_alt_number (0, 6, 1); + tm->tm_wday = val; + have_wday = 1; + break; + case 'y': + /* Match year within century using alternate numeric symbols. */ + get_alt_number (0, 99, 2); + tm->tm_year = val >= 69 ? val : val + 100; + want_xday = 1; + break; + default: + return NULL; + } + break; + default: + return NULL; + } + } + + if (have_I && is_pm) + tm->tm_hour += 12; + + if (century != -1) + { + if (want_century) + tm->tm_year = tm->tm_year % 100 + (century - 19) * 100; + else + /* Only the century, but not the year. Strange, but so be it. */ + tm->tm_year = (century - 19) * 100; + } + +#ifdef _NL_CURRENT + if (era_cnt != -1) + { + era = _nl_select_era_entry(era_cnt); + if (want_era) + tm->tm_year = (era->start_date[0] + + ((tm->tm_year - era->offset) + * era->absolute_direction)); + else + /* Era start year assumed. */ + tm->tm_year = era->start_date[0]; + } + else +#endif + if (want_era) + return NULL; + + if (want_xday && !have_wday) + { + if ( !(have_mon && have_mday) && have_yday) + { + /* We don't have tm_mon and/or tm_mday, compute them. */ + int t_mon = 0; + while (__mon_yday[__isleap(1900 + tm->tm_year)][t_mon] <= tm->tm_yday) + t_mon++; + if (!have_mon) + tm->tm_mon = t_mon - 1; + if (!have_mday) + tm->tm_mday = + (tm->tm_yday + - __mon_yday[__isleap(1900 + tm->tm_year)][t_mon - 1] + 1); + } + day_of_the_week (tm); + } + if (want_xday && !have_yday) + day_of_the_year (tm); + + return discard_const_p(char, rp); +} + + +char *rep_strptime(const char *buf, const char *format, struct tm *tm) +{ + enum locale_status decided; + +#ifdef _NL_CURRENT + decided = not; +#else + decided = raw; +#endif + return strptime_internal (buf, format, tm, &decided, -1); +} diff --git a/lib/replace/system/README b/lib/replace/system/README new file mode 100644 index 0000000..69a2b80 --- /dev/null +++ b/lib/replace/system/README @@ -0,0 +1,4 @@ +This directory contains wrappers around logical groups of system +include files. The idea is to avoid #ifdef blocks in the main code, +and instead put all the necessary conditional includes in subsystem +specific header files in this directory. diff --git a/lib/replace/system/capability.h b/lib/replace/system/capability.h new file mode 100644 index 0000000..44b8d51 --- /dev/null +++ b/lib/replace/system/capability.h @@ -0,0 +1,57 @@ +#ifndef _system_capability_h +#define _system_capability_h +/* + Unix SMB/CIFS implementation. + + capability system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#ifdef HAVE_SYS_CAPABILITY_H + +#if defined(BROKEN_REDHAT_7_SYSTEM_HEADERS) && !defined(_I386_STATFS_H) && !defined(_PPC_STATFS_H) +#define _I386_STATFS_H +#define _PPC_STATFS_H +#define BROKEN_REDHAT_7_STATFS_WORKAROUND +#endif + +#if defined(BROKEN_RHEL5_SYS_CAP_HEADER) && !defined(_LINUX_TYPES_H) +#define BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND +#endif + +#ifdef HAVE_POSIX_CAPABILITIES +#include <sys/capability.h> +#endif + +#ifdef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND +#undef _LINUX_TYPES_H +#undef BROKEN_RHEL5_SYS_CAP_HEADER_WORKAROUND +#endif + +#ifdef BROKEN_REDHAT_7_STATFS_WORKAROUND +#undef _PPC_STATFS_H +#undef _I386_STATFS_H +#undef BROKEN_REDHAT_7_STATFS_WORKAROUND +#endif + +#endif + +#endif diff --git a/lib/replace/system/dir.h b/lib/replace/system/dir.h new file mode 100644 index 0000000..497b5a7 --- /dev/null +++ b/lib/replace/system/dir.h @@ -0,0 +1,71 @@ +#ifndef _system_dir_h +#define _system_dir_h +/* + Unix SMB/CIFS implementation. + + directory system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#ifdef HAVE_DIRENT_H +# include <dirent.h> +# define NAMLEN(dirent) strlen((dirent)->d_name) +#else +# define dirent direct +# define NAMLEN(dirent) (dirent)->d_namlen +# if HAVE_SYS_NDIR_H +# include <sys/ndir.h> +# endif +# if HAVE_SYS_DIR_H +# include <sys/dir.h> +# endif +# if HAVE_NDIR_H +# include <ndir.h> +# endif +#endif + +#ifndef HAVE_MKDIR_MODE +#define mkdir(dir, mode) mkdir(dir) +#endif + +#ifdef HAVE_LIBGEN_H +# include <libgen.h> +#endif + +/* Test whether a file name is the "." or ".." directory entries. + * These really should be inline functions. + */ +#ifndef ISDOT +#define ISDOT(path) ( \ + *((const char *)(path)) == '.' && \ + *(((const char *)(path)) + 1) == '\0' \ + ) +#endif + +#ifndef ISDOTDOT +#define ISDOTDOT(path) ( \ + *((const char *)(path)) == '.' && \ + *(((const char *)(path)) + 1) == '.' && \ + *(((const char *)(path)) + 2) == '\0' \ + ) +#endif + +#endif diff --git a/lib/replace/system/filesys.h b/lib/replace/system/filesys.h new file mode 100644 index 0000000..8005b18 --- /dev/null +++ b/lib/replace/system/filesys.h @@ -0,0 +1,281 @@ +#ifndef _system_filesys_h +#define _system_filesys_h +/* + Unix SMB/CIFS implementation. + + filesystem system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_UNISTD_H +#include <unistd.h> +#endif + +#include <sys/stat.h> + +#ifdef HAVE_SYS_PARAM_H +#include <sys/param.h> +#endif + +/* This include is required on UNIX (*BSD, AIX, ...) for statfs() */ +#if !defined(LINUX) && defined(HAVE_SYS_MOUNT_H) +#include <sys/mount.h> +#endif + +#ifdef HAVE_MNTENT_H +#include <mntent.h> +#endif + +/* This include is required on Linux for statfs() */ +#ifdef HAVE_SYS_VFS_H +#include <sys/vfs.h> +#endif + +#ifdef HAVE_SYS_ACL_H +#include <sys/acl.h> +#endif + +#ifdef HAVE_ACL_LIBACL_H +#include <acl/libacl.h> +#endif + +#ifdef HAVE_SYS_FS_S5PARAM_H +#include <sys/fs/s5param.h> +#endif + +#if defined (HAVE_SYS_FILSYS_H) && !defined (_CRAY) +#include <sys/filsys.h> +#endif + +#ifdef HAVE_SYS_STATFS_H +# include <sys/statfs.h> +#endif + +#ifdef HAVE_DUSTAT_H +#include <sys/dustat.h> +#endif + +#ifdef HAVE_SYS_STATVFS_H +#include <sys/statvfs.h> +#endif + +#ifdef HAVE_SYS_FILIO_H +#include <sys/filio.h> +#endif + +#ifdef HAVE_SYS_FILE_H +#include <sys/file.h> +#endif + +#ifdef HAVE_FCNTL_H +#include <fcntl.h> +#else +#ifdef HAVE_SYS_FCNTL_H +#include <sys/fcntl.h> +#endif +#endif + +#ifdef HAVE_SYS_MODE_H +/* apparently AIX needs this for S_ISLNK */ +#ifndef S_ISLNK +#include <sys/mode.h> +#endif +#endif + +#ifdef HAVE_SYS_IOCTL_H +#include <sys/ioctl.h> +#endif + +#ifdef HAVE_SYS_UIO_H +#include <sys/uio.h> +#endif + +/* mutually exclusive (SuSE 8.2) */ +#if defined(HAVE_SYS_XATTR_H) +#include <sys/xattr.h> +#elif defined(HAVE_ATTR_XATTR_H) +#include <attr/xattr.h> +#elif defined(HAVE_SYS_ATTRIBUTES_H) +#include <sys/attributes.h> +#elif defined(HAVE_ATTR_ATTRIBUTES_H) +#include <attr/attributes.h> +#endif + +#ifdef HAVE_SYS_EA_H +#include <sys/ea.h> +#endif + +#ifdef HAVE_SYS_EXTATTR_H +#include <sys/extattr.h> +#endif + +#ifdef HAVE_SYS_RESOURCE_H +#include <sys/resource.h> +#endif + +#ifndef XATTR_CREATE +#define XATTR_CREATE 0x1 /* set value, fail if attr already exists */ +#endif + +#ifndef XATTR_REPLACE +#define XATTR_REPLACE 0x2 /* set value, fail if attr does not exist */ +#endif + +/* Some POSIX definitions for those without */ + +#ifndef S_IFDIR +#define S_IFDIR 0x4000 +#endif +#ifndef S_ISDIR +#define S_ISDIR(mode) ((mode & 0xF000) == S_IFDIR) +#endif +#ifndef S_IRWXU +#define S_IRWXU 00700 /* read, write, execute: owner */ +#endif +#ifndef S_IRUSR +#define S_IRUSR 00400 /* read permission: owner */ +#endif +#ifndef S_IWUSR +#define S_IWUSR 00200 /* write permission: owner */ +#endif +#ifndef S_IXUSR +#define S_IXUSR 00100 /* execute permission: owner */ +#endif +#ifndef S_IRWXG +#define S_IRWXG 00070 /* read, write, execute: group */ +#endif +#ifndef S_IRGRP +#define S_IRGRP 00040 /* read permission: group */ +#endif +#ifndef S_IWGRP +#define S_IWGRP 00020 /* write permission: group */ +#endif +#ifndef S_IXGRP +#define S_IXGRP 00010 /* execute permission: group */ +#endif +#ifndef S_IRWXO +#define S_IRWXO 00007 /* read, write, execute: other */ +#endif +#ifndef S_IROTH +#define S_IROTH 00004 /* read permission: other */ +#endif +#ifndef S_IWOTH +#define S_IWOTH 00002 /* write permission: other */ +#endif +#ifndef S_IXOTH +#define S_IXOTH 00001 /* execute permission: other */ +#endif + +#ifndef O_ACCMODE +#define O_ACCMODE (O_RDONLY | O_WRONLY | O_RDWR) +#endif + +#ifndef MAXPATHLEN +#define MAXPATHLEN 256 +#endif + +#ifndef SEEK_SET +#define SEEK_SET 0 +#endif + +#ifdef _WIN32 +#define mkdir(d,m) _mkdir(d) +#endif + +#ifdef DISABLE_OPATH +#undef O_PATH +#endif + +/* + this allows us to use a uniform error handling for our xattr + wrappers +*/ +#ifndef ENOATTR +#define ENOATTR ENODATA +#endif + + +#if !defined(HAVE_XATTR_XATTR) || defined(XATTR_ADDITIONAL_OPTIONS) + +ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size); +#define getxattr(path, name, value, size) rep_getxattr(path, name, value, size) +/* define is in "replace.h" */ +ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size); +#define fgetxattr(filedes, name, value, size) rep_fgetxattr(filedes, name, value, size) +/* define is in "replace.h" */ +ssize_t rep_listxattr (const char *path, char *list, size_t size); +#define listxattr(path, list, size) rep_listxattr(path, list, size) +/* define is in "replace.h" */ +ssize_t rep_flistxattr (int filedes, char *list, size_t size); +#define flistxattr(filedes, value, size) rep_flistxattr(filedes, value, size) +/* define is in "replace.h" */ +int rep_removexattr (const char *path, const char *name); +#define removexattr(path, name) rep_removexattr(path, name) +/* define is in "replace.h" */ +int rep_fremovexattr (int filedes, const char *name); +#define fremovexattr(filedes, name) rep_fremovexattr(filedes, name) +/* define is in "replace.h" */ +int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags); +#define setxattr(path, name, value, size, flags) rep_setxattr(path, name, value, size, flags) +/* define is in "replace.h" */ +int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags); +#define fsetxattr(filedes, name, value, size, flags) rep_fsetxattr(filedes, name, value, size, flags) +/* define is in "replace.h" */ + +#endif /* !defined(HAVE_XATTR_XATTR) || defined(XATTR_ADDITIONAL_OPTIONS) */ + +#ifdef HAVE_LINUX_OPENAT2_H +#include <linux/openat2.h> +#else /* ! HAVE_LINUX_OPENAT2_H */ +/* how->resolve flags for openat2(2). */ +#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings + (includes bind-mounts). */ +#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style + "magic-links". */ +#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks + (implies OEXT_NO_MAGICLINKS) */ +#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like + "..", symlinks, and absolute + paths which escape the dirfd. */ +#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".." + be scoped inside the dirfd + (similar to chroot(2)). */ +#define RESOLVE_CACHED 0x20 /* Only complete if resolution can be + completed through cached lookup. May + return -EAGAIN if that's not + possible. */ +struct __rep_open_how { + uint64_t flags; + uint64_t mode; + uint64_t resolve; +}; +#define open_how __rep_open_how +#endif /* ! HAVE_LINUX_OPENAT2_H */ + +#ifndef HAVE_OPENAT2 +long rep_openat2(int dirfd, const char *pathname, + struct open_how *how, size_t size); +#define openat2(dirfd, pathname, how, size) \ + rep_openat2(dirfd, pathname, how, size) +#endif /* !HAVE_OPENAT2 */ + +#endif diff --git a/lib/replace/system/glob.h b/lib/replace/system/glob.h new file mode 100644 index 0000000..3e23db6 --- /dev/null +++ b/lib/replace/system/glob.h @@ -0,0 +1,37 @@ +#ifndef _system_glob_h +#define _system_glob_h +/* + Unix SMB/CIFS implementation. + + glob system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_GLOB_H +#include <glob.h> +#endif + +#ifdef HAVE_FNMATCH_H +#include <fnmatch.h> +#endif + +#endif diff --git a/lib/replace/system/gssapi.h b/lib/replace/system/gssapi.h new file mode 100644 index 0000000..d8632d7 --- /dev/null +++ b/lib/replace/system/gssapi.h @@ -0,0 +1,53 @@ +#ifndef _system_gssapi_h +#define _system_gssapi_h + +/* + Unix SMB/CIFS implementation. + + GSSAPI system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_GSSAPI + +#ifdef HAVE_GSSAPI_GSSAPI_EXT_H +#include <gssapi/gssapi_ext.h> +#elif defined(HAVE_GSSAPI_GSSAPI_H) +#include <gssapi/gssapi.h> +#elif defined(HAVE_GSSAPI_GSSAPI_GENERIC_H) +#include <gssapi/gssapi_generic.h> +#elif defined(HAVE_GSSAPI_H) +#include <gssapi.h> +#endif + +#ifdef HAVE_GSSAPI_GSSAPI_KRB5_H +#include <gssapi/gssapi_krb5.h> +#endif + +#ifdef HAVE_GSSAPI_GSSAPI_SPNEGO_H +#include <gssapi/gssapi_spnego.h> +#elif defined(HAVE_GSSAPI_SPNEGO_H) +#include <gssapi_spnego.h> +#endif + +#endif +#endif diff --git a/lib/replace/system/iconv.h b/lib/replace/system/iconv.h new file mode 100644 index 0000000..3c8a71f --- /dev/null +++ b/lib/replace/system/iconv.h @@ -0,0 +1,57 @@ +#ifndef _system_iconv_h +#define _system_iconv_h +/* + Unix SMB/CIFS implementation. + + iconv memory system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#if !defined(HAVE_ICONV) && defined(HAVE_ICONV_H) +#define HAVE_ICONV +#endif + +#if !defined(HAVE_GICONV) && defined(HAVE_GICONV_H) +#define HAVE_GICONV +#endif + +#if !defined(HAVE_BICONV) && defined(HAVE_BICONV_H) +#define HAVE_BICONV +#endif + +#ifdef HAVE_NATIVE_ICONV +#if defined(HAVE_ICONV) +#include <iconv.h> +#elif defined(HAVE_GICONV) +#include <giconv.h> +#elif defined(HAVE_BICONV) +#include <biconv.h> +#endif +#endif /* HAVE_NATIVE_ICONV */ + +/* needed for some systems without iconv. Doesn't really matter + what error code we use */ +#ifndef EILSEQ +#define EILSEQ EIO +#endif + +#endif diff --git a/lib/replace/system/kerberos.h b/lib/replace/system/kerberos.h new file mode 100644 index 0000000..ebd8657 --- /dev/null +++ b/lib/replace/system/kerberos.h @@ -0,0 +1,41 @@ +#ifndef _system_kerberos_h +#define _system_kerberos_h + +/* + Unix SMB/CIFS implementation. + + kerberos system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_KRB5 + +#ifdef HAVE_KRB5_H +#include <krb5.h> +#endif + +#ifdef HAVE_COM_ERR_H +#include <com_err.h> +#endif + +#endif +#endif diff --git a/lib/replace/system/locale.h b/lib/replace/system/locale.h new file mode 100644 index 0000000..504a3bb --- /dev/null +++ b/lib/replace/system/locale.h @@ -0,0 +1,42 @@ +#ifndef _system_locale_h +#define _system_locale_h + +/* + Unix SMB/CIFS implementation. + + locale include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_CTYPE_H +#include <ctype.h> +#endif + +#ifdef HAVE_LOCALE_H +#include <locale.h> +#endif + +#ifdef HAVE_LANGINFO_H +#include <langinfo.h> +#endif + +#endif diff --git a/lib/replace/system/network.h b/lib/replace/system/network.h new file mode 100644 index 0000000..1721d65 --- /dev/null +++ b/lib/replace/system/network.h @@ -0,0 +1,391 @@ +#ifndef _system_network_h +#define _system_network_h +/* + Unix SMB/CIFS implementation. + + networking system include wrappers + + Copyright (C) Andrew Tridgell 2004 + Copyright (C) Jelmer Vernooij 2007 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifndef LIBREPLACE_NETWORK_CHECKS +#error "AC_LIBREPLACE_NETWORK_CHECKS missing in configure" +#endif + +#ifdef HAVE_UNISTD_H +#include <unistd.h> +#endif + +#ifdef HAVE_SYS_SOCKET_H +#include <sys/socket.h> +#endif + +#ifdef HAVE_UNIXSOCKET +#include <sys/un.h> +#endif + +#ifdef HAVE_NETINET_IN_H +#include <netinet/in.h> +#endif +#ifdef HAVE_ARPA_INET_H +#include <arpa/inet.h> +#endif + +#ifdef HAVE_NETDB_H +#include <netdb.h> +#endif + +#ifdef HAVE_NETINET_TCP_H +#include <netinet/tcp.h> +#endif + +/* + * The next three defines are needed to access the IPTOS_* options + * on some systems. + */ + +#ifdef HAVE_NETINET_IN_SYSTM_H +#include <netinet/in_systm.h> +#endif + +#ifdef HAVE_NETINET_IN_IP_H +#include <netinet/in_ip.h> +#endif + +#ifdef HAVE_NETINET_IP_H +#include <netinet/ip.h> +#endif + +#ifdef HAVE_NET_IF_H +#include <net/if.h> +#endif + +#ifdef HAVE_SYS_IOCTL_H +#include <sys/ioctl.h> +#endif + +#ifdef HAVE_SYS_UIO_H +#include <sys/uio.h> +#endif + +#ifdef HAVE_STROPTS_H +#include <stropts.h> +#endif + +#include <limits.h> + +#ifndef HAVE_SOCKLEN_T +#define HAVE_SOCKLEN_T +typedef int socklen_t; +#endif + +#if !defined (HAVE_INET_NTOA) || defined(REPLACE_INET_NTOA) +/* define is in "replace.h" */ +char *rep_inet_ntoa(struct in_addr ip); +#endif + +#ifndef HAVE_INET_PTON +/* define is in "replace.h" */ +int rep_inet_pton(int af, const char *src, void *dst); +#endif + +#ifndef HAVE_INET_NTOP +/* define is in "replace.h" */ +const char *rep_inet_ntop(int af, const void *src, char *dst, socklen_t size); +#endif + +#ifndef HAVE_INET_ATON +/* define is in "replace.h" */ +int rep_inet_aton(const char *src, struct in_addr *dst); +#endif + +#ifndef HAVE_CONNECT +/* define is in "replace.h" */ +int rep_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen); +#endif + +#ifndef HAVE_GETHOSTBYNAME +/* define is in "replace.h" */ +struct hostent *rep_gethostbyname(const char *name); +#endif + +#ifdef HAVE_IFADDRS_H +#include <ifaddrs.h> +#endif + +#ifndef HAVE_STRUCT_IFADDRS +struct ifaddrs { + struct ifaddrs *ifa_next; /* Pointer to next struct */ + char *ifa_name; /* Interface name */ + unsigned int ifa_flags; /* Interface flags */ + struct sockaddr *ifa_addr; /* Interface address */ + struct sockaddr *ifa_netmask; /* Interface netmask */ +#undef ifa_dstaddr + struct sockaddr *ifa_dstaddr; /* P2P interface destination */ + void *ifa_data; /* Address specific data */ +}; +#endif + +#ifndef HAVE_GETIFADDRS +int rep_getifaddrs(struct ifaddrs **); +#endif + +#ifndef HAVE_FREEIFADDRS +void rep_freeifaddrs(struct ifaddrs *); +#endif + +#ifndef HAVE_SOCKETPAIR +/* define is in "replace.h" */ +int rep_socketpair(int d, int type, int protocol, int sv[2]); +#endif + +/* + * Some systems have getaddrinfo but not the + * defines needed to use it. + */ + +/* Various macros that ought to be in <netdb.h>, but might not be */ + +#ifndef EAI_FAIL +#define EAI_BADFLAGS (-1) +#define EAI_NONAME (-2) +#define EAI_AGAIN (-3) +#define EAI_FAIL (-4) +#define EAI_FAMILY (-6) +#define EAI_SOCKTYPE (-7) +#define EAI_SERVICE (-8) +#define EAI_MEMORY (-10) +#define EAI_SYSTEM (-11) +#endif /* !EAI_FAIL */ + +#ifndef AI_PASSIVE +#define AI_PASSIVE 0x0001 +#endif + +#ifndef AI_CANONNAME +#define AI_CANONNAME 0x0002 +#endif + +#ifndef AI_NUMERICHOST +/* + * some platforms don't support AI_NUMERICHOST; define as zero if using + * the system version of getaddrinfo... + */ +#if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) +#define AI_NUMERICHOST 0 +#else +#define AI_NUMERICHOST 0x0004 +#endif +#endif + +/* + * Some of the functions in source3/lib/util_sock.c use AI_ADDRCONFIG. On QNX + * 6.3.0, this macro is defined but, if it's used, getaddrinfo will fail. This + * prevents smbd from opening any sockets. + * + * If I undefine AI_ADDRCONFIG on such systems and define it to be 0, + * this works around the issue. + */ +#ifdef __QNX__ +#include <sys/neutrino.h> +#if _NTO_VERSION == 630 +#undef AI_ADDRCONFIG +#endif +#endif +#ifndef AI_ADDRCONFIG +/* + * logic copied from AI_NUMERICHOST + */ +#if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) +#define AI_ADDRCONFIG 0 +#else +#define AI_ADDRCONFIG 0x0020 +#endif +#endif + +#ifndef AI_NUMERICSERV +/* + * logic copied from AI_NUMERICHOST + */ +#if defined(HAVE_STRUCT_ADDRINFO) && defined(HAVE_GETADDRINFO) +#define AI_NUMERICSERV 0 +#else +#define AI_NUMERICSERV 0x0400 +#endif +#endif + +#ifndef NI_NUMERICHOST +#define NI_NUMERICHOST 1 +#endif + +#ifndef NI_NUMERICSERV +#define NI_NUMERICSERV 2 +#endif + +#ifndef NI_NOFQDN +#define NI_NOFQDN 4 +#endif + +#ifndef NI_NAMEREQD +#define NI_NAMEREQD 8 +#endif + +#ifndef NI_DGRAM +#define NI_DGRAM 16 +#endif + + +#ifndef NI_MAXHOST +#define NI_MAXHOST 1025 +#endif + +#ifndef NI_MAXSERV +#define NI_MAXSERV 32 +#endif + +/* + * glibc on linux doesn't seem to have MSG_WAITALL + * defined. I think the kernel has it though.. + */ +#ifndef MSG_WAITALL +#define MSG_WAITALL 0 +#endif + +#ifndef INADDR_LOOPBACK +#define INADDR_LOOPBACK 0x7f000001 +#endif + +#ifndef INADDR_NONE +#define INADDR_NONE 0xffffffff +#endif + +#ifndef EAFNOSUPPORT +#define EAFNOSUPPORT EINVAL +#endif + +#ifndef INET_ADDRSTRLEN +#define INET_ADDRSTRLEN 16 +#endif + +#ifndef INET6_ADDRSTRLEN +#define INET6_ADDRSTRLEN 46 +#endif + +#ifndef HOST_NAME_MAX +#define HOST_NAME_MAX 255 +#endif + +#ifndef MAXHOSTNAMELEN +#define MAXHOSTNAMELEN HOST_NAME_MAX +#endif + +#ifndef HAVE_SA_FAMILY_T +#define HAVE_SA_FAMILY_T +typedef unsigned short int sa_family_t; +#endif + +#ifndef HAVE_STRUCT_SOCKADDR_STORAGE +#define HAVE_STRUCT_SOCKADDR_STORAGE +#ifdef HAVE_STRUCT_SOCKADDR_IN6 +#define sockaddr_storage sockaddr_in6 +#define ss_family sin6_family +#define HAVE_SS_FAMILY 1 +#else /*HAVE_STRUCT_SOCKADDR_IN6*/ +#define sockaddr_storage sockaddr_in +#define ss_family sin_family +#define HAVE_SS_FAMILY 1 +#endif /*HAVE_STRUCT_SOCKADDR_IN6*/ +#endif /*HAVE_STRUCT_SOCKADDR_STORAGE*/ + +#ifndef HAVE_SS_FAMILY +#ifdef HAVE___SS_FAMILY +#define ss_family __ss_family +#define HAVE_SS_FAMILY 1 +#endif +#endif + +#ifndef IOV_MAX +# ifdef UIO_MAXIOV +# define IOV_MAX UIO_MAXIOV +# else +# ifdef __sgi + /* + * IRIX 6.5 has sysconf(_SC_IOV_MAX) + * which might return 512 or bigger + */ +# define IOV_MAX 512 +# endif +# ifdef __GNU__ + /* + * GNU/Hurd does not have such hardcoded limitations. Use a reasonable + * amount. + */ +# define IOV_MAX 512 +# endif +# endif +#endif + +#ifndef PIPE_BUF +# ifdef __GNU__ + /* + * GNU/Hurd does not have such hardcoded limitations. But it has to support + * the minimum POSIX value anyway. + */ +# define PIPE_BUF 512 +# endif +#endif + +#ifndef HAVE_STRUCT_ADDRINFO +#define HAVE_STRUCT_ADDRINFO +struct addrinfo { + int ai_flags; + int ai_family; + int ai_socktype; + int ai_protocol; + socklen_t ai_addrlen; + struct sockaddr *ai_addr; + char *ai_canonname; + struct addrinfo *ai_next; +}; +#endif /* HAVE_STRUCT_ADDRINFO */ + +#if !defined(HAVE_GETADDRINFO) +#include "getaddrinfo.h" +#endif + +/* Needed for some systems that don't define it (Solaris). */ +#ifndef ifr_netmask +#define ifr_netmask ifr_addr +#endif + +/* Some old Linux systems have broken header files */ +#ifdef HAVE_IPV6 +#ifdef HAVE_LINUX_IPV6_V6ONLY_26 +#define IPV6_V6ONLY 26 +#endif /* HAVE_LINUX_IPV6_V6ONLY_26 */ +#endif /* HAVE_IPV6 */ + +#ifndef SCOPE_DELIMITER +#define SCOPE_DELIMITER '%' +#endif + +#endif diff --git a/lib/replace/system/passwd.h b/lib/replace/system/passwd.h new file mode 100644 index 0000000..ecc9f60 --- /dev/null +++ b/lib/replace/system/passwd.h @@ -0,0 +1,92 @@ +#ifndef _system_passwd_h +#define _system_passwd_h + +/* + Unix SMB/CIFS implementation. + + passwd system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_UNISTD_H +#include <unistd.h> +#endif + +#ifdef HAVE_PWD_H +#include <pwd.h> +#endif +#ifdef HAVE_GRP_H +#include <grp.h> +#endif +#ifdef HAVE_SYS_PRIV_H +#include <sys/priv.h> +#endif +#ifdef HAVE_SYS_ID_H +#include <sys/id.h> +#endif + +#ifdef HAVE_CRYPT_H +#include <crypt.h> +#endif + +#ifdef HAVE_SHADOW_H +#include <shadow.h> +#endif + +#ifdef HAVE_SYS_SECURITY_H +#include <sys/security.h> +#include <prot.h> +#define PASSWORD_LENGTH 16 +#endif /* HAVE_SYS_SECURITY_H */ + +#ifdef HAVE_GETPWANAM +#include <sys/label.h> +#include <sys/audit.h> +#include <pwdadj.h> +#endif + +#ifdef HAVE_COMPAT_H +#include <compat.h> +#endif + +#ifndef NGROUPS_MAX +#define NGROUPS_MAX 32 /* Guess... */ +#endif + +/* what is the longest significant password available on your system? + Knowing this speeds up password searches a lot */ +#ifndef PASSWORD_LENGTH +#define PASSWORD_LENGTH 8 +#endif + + +#ifndef ALLOW_CHANGE_PASSWORD +#if (defined(HAVE_TERMIOS_H) && defined(HAVE_DUP2) && defined(HAVE_SETSID)) +#define ALLOW_CHANGE_PASSWORD 1 +#endif +#endif + +#if defined(HAVE_CRYPT16) && defined(HAVE_GETAUTHUID) +#define ULTRIX_AUTH 1 +#endif + +#endif diff --git a/lib/replace/system/python.h b/lib/replace/system/python.h new file mode 100644 index 0000000..b242bae --- /dev/null +++ b/lib/replace/system/python.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2023 Andreas Schneider <asn@samba.org> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _SAMBA_PYTHON_H +#define _SAMBA_PYTHON_H + +/* + * With Python 3.6 Cpython started to require C99. With Python 3.12 they + * started to mix code and variable declarations so disable the warnings. + */ +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wdeclaration-after-statement" +#include <Python.h> +#pragma GCC diagnostic pop + +#endif /* _SAMBA_PYTHON_H */ diff --git a/lib/replace/system/readline.h b/lib/replace/system/readline.h new file mode 100644 index 0000000..2937962 --- /dev/null +++ b/lib/replace/system/readline.h @@ -0,0 +1,61 @@ +#ifndef _system_readline_h +#define _system_readline_h +/* + Unix SMB/CIFS implementation. + + Readline wrappers + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_LIBREADLINE +# ifdef HAVE_READLINE_READLINE_H +# ifdef HAVE_READLINE_READLINE_WORKAROUND +# define _FUNCTION_DEF +# endif +# include <readline/readline.h> +# ifdef HAVE_READLINE_HISTORY_H +# include <readline/history.h> +# endif +# else +# ifdef HAVE_READLINE_H +# include <readline.h> +# ifdef HAVE_HISTORY_H +# include <history.h> +# endif +# else +# undef HAVE_LIBREADLINE +# endif +# endif +#endif + +#ifdef HAVE_NEW_LIBREADLINE +#ifdef HAVE_CPPFUNCTION +# define RL_COMPLETION_CAST (CPPFunction *) +#elif defined(HAVE_RL_COMPLETION_T) +# define RL_COMPLETION_CAST (rl_completion_t *) +#else +# define RL_COMPLETION_CAST +#endif +#else +/* This type is missing from libreadline<4.0 (approximately) */ +# define RL_COMPLETION_CAST +#endif /* HAVE_NEW_LIBREADLINE */ + +#endif diff --git a/lib/replace/system/select.h b/lib/replace/system/select.h new file mode 100644 index 0000000..11c5390 --- /dev/null +++ b/lib/replace/system/select.h @@ -0,0 +1,77 @@ +#ifndef _system_select_h +#define _system_select_h +/* + Unix SMB/CIFS implementation. + + select system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_SYS_SELECT_H +#include <sys/select.h> +#endif + +#ifdef HAVE_SYS_EPOLL_H +#include <sys/epoll.h> +#endif + +#ifndef SELECT_CAST +#define SELECT_CAST +#endif + +#ifdef HAVE_POLL + +#include <poll.h> + +#else + +/* Type used for the number of file descriptors. */ +typedef unsigned long int nfds_t; + +/* Data structure describing a polling request. */ +struct pollfd +{ + int fd; /* File descriptor to poll. */ + short int events; /* Types of events poller cares about. */ + short int revents; /* Types of events that actually occurred. */ +}; + +/* Event types that can be polled for. These bits may be set in `events' + to indicate the interesting event types; they will appear in `revents' + to indicate the status of the file descriptor. */ +#define POLLIN 0x001 /* There is data to read. */ +#define POLLPRI 0x002 /* There is urgent data to read. */ +#define POLLOUT 0x004 /* Writing now will not block. */ +#define POLLRDNORM 0x040 /* Normal data may be read. */ +#define POLLRDBAND 0x080 /* Priority data may be read. */ +#define POLLWRNORM 0x100 /* Writing now will not block. */ +#define POLLWRBAND 0x200 /* Priority data may be written. */ +#define POLLERR 0x008 /* Error condition. */ +#define POLLHUP 0x010 /* Hung up. */ +#define POLLNVAL 0x020 /* Invalid polling request. */ + +/* define is in "replace.h" */ +int rep_poll(struct pollfd *fds, nfds_t nfds, int timeout); + +#endif + +#endif diff --git a/lib/replace/system/shmem.h b/lib/replace/system/shmem.h new file mode 100644 index 0000000..64fe39b --- /dev/null +++ b/lib/replace/system/shmem.h @@ -0,0 +1,59 @@ +#ifndef _system_shmem_h +#define _system_shmem_h +/* + Unix SMB/CIFS implementation. + + shared memory system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#if defined(HAVE_SYS_IPC_H) +#include <sys/ipc.h> +#endif /* HAVE_SYS_IPC_H */ + +#if defined(HAVE_SYS_SHM_H) +#include <sys/shm.h> +#endif /* HAVE_SYS_SHM_H */ + +#ifdef HAVE_SYS_MMAN_H +#include <sys/mman.h> +#endif + +/* NetBSD doesn't have these */ +#ifndef SHM_R +#define SHM_R 0400 +#endif + +#ifndef SHM_W +#define SHM_W 0200 +#endif + + +#ifndef MAP_FILE +#define MAP_FILE 0 +#endif + +#ifndef MAP_FAILED +#define MAP_FAILED ((void *)-1) +#endif + +#endif diff --git a/lib/replace/system/syslog.h b/lib/replace/system/syslog.h new file mode 100644 index 0000000..104be1d --- /dev/null +++ b/lib/replace/system/syslog.h @@ -0,0 +1,70 @@ +#ifndef _system_syslog_h +#define _system_syslog_h +/* + Unix SMB/CIFS implementation. + + syslog system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_SYSLOG_H +#include <syslog.h> +#else +#ifdef HAVE_SYS_SYSLOG_H +#include <sys/syslog.h> +#endif +#endif + +/* For sys_adminlog(). */ +#ifndef LOG_EMERG +#define LOG_EMERG 0 /* system is unusable */ +#endif + +#ifndef LOG_ALERT +#define LOG_ALERT 1 /* action must be taken immediately */ +#endif + +#ifndef LOG_CRIT +#define LOG_CRIT 2 /* critical conditions */ +#endif + +#ifndef LOG_ERR +#define LOG_ERR 3 /* error conditions */ +#endif + +#ifndef LOG_WARNING +#define LOG_WARNING 4 /* warning conditions */ +#endif + +#ifndef LOG_NOTICE +#define LOG_NOTICE 5 /* normal but significant condition */ +#endif + +#ifndef LOG_INFO +#define LOG_INFO 6 /* informational */ +#endif + +#ifndef LOG_DEBUG +#define LOG_DEBUG 7 /* debug-level messages */ +#endif + +#endif diff --git a/lib/replace/system/terminal.h b/lib/replace/system/terminal.h new file mode 100644 index 0000000..9ad601a --- /dev/null +++ b/lib/replace/system/terminal.h @@ -0,0 +1,46 @@ +#ifndef _system_terminal_h +#define _system_terminal_h +/* + Unix SMB/CIFS implementation. + + terminal system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef SUNOS4 +/* on SUNOS4 termios.h conflicts with sys/ioctl.h */ +#undef HAVE_TERMIOS_H +#endif + + +#if defined(HAVE_TERMIOS_H) +/* POSIX terminal handling. */ +#include <termios.h> +#elif defined(HAVE_TERMIO_H) +/* Older SYSV terminal handling - don't use if we can avoid it. */ +#include <termio.h> +#elif defined(HAVE_SYS_TERMIO_H) +/* Older SYSV terminal handling - don't use if we can avoid it. */ +#include <sys/termio.h> +#endif + +#endif diff --git a/lib/replace/system/threads.h b/lib/replace/system/threads.h new file mode 100644 index 0000000..d189ed6 --- /dev/null +++ b/lib/replace/system/threads.h @@ -0,0 +1,72 @@ +#ifndef _system_threads_h +#define _system_threads_h +/* + Unix SMB/CIFS implementation. + + macros to go along with the lib/replace/ portability layer code + + Copyright (C) Volker Lendecke 2012 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include <pthread.h> + +#if defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP) && \ + !defined(HAVE_PTHREAD_MUTEXATTR_SETROBUST) +#define pthread_mutexattr_setrobust pthread_mutexattr_setrobust_np +#endif + +#if defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP) && \ + !defined(HAVE_DECL_PTHREAD_MUTEX_ROBUST) +#define PTHREAD_MUTEX_ROBUST PTHREAD_MUTEX_ROBUST_NP +#endif + +#if defined(HAVE_PTHREAD_MUTEX_CONSISTENT_NP) && \ + !defined(HAVE_PTHREAD_MUTEX_CONSISTENT) +#define pthread_mutex_consistent pthread_mutex_consistent_np +#endif + +#ifdef HAVE_STDATOMIC_H +#include <stdatomic.h> +#endif + +#ifndef HAVE_ATOMIC_THREAD_FENCE +#ifdef HAVE___ATOMIC_THREAD_FENCE +#define atomic_thread_fence(__ignore_order) __atomic_thread_fence(__ATOMIC_SEQ_CST) +#define HAVE_ATOMIC_THREAD_FENCE 1 +#endif /* HAVE___ATOMIC_THREAD_FENCE */ +#endif /* not HAVE_ATOMIC_THREAD_FENCE */ + +#ifndef HAVE_ATOMIC_THREAD_FENCE +#ifdef HAVE___SYNC_SYNCHRONIZE +#define atomic_thread_fence(__ignore_order) __sync_synchronize() +#define HAVE_ATOMIC_THREAD_FENCE 1 +#endif /* HAVE___SYNC_SYNCHRONIZE */ +#endif /* not HAVE_ATOMIC_THREAD_FENCE */ + +#ifndef HAVE_ATOMIC_THREAD_FENCE +#ifdef HAVE_ATOMIC_THREAD_FENCE_SUPPORT +#error mismatch_error_between_configure_test_and_header +#endif +/* make sure the build fails if someone uses it without checking the define */ +#define atomic_thread_fence(__order) \ + __function__atomic_thread_fence_not_available_on_this_platform__() +#endif /* not HAVE_ATOMIC_THREAD_FENCE */ + +#endif diff --git a/lib/replace/system/time.h b/lib/replace/system/time.h new file mode 100644 index 0000000..272fe84 --- /dev/null +++ b/lib/replace/system/time.h @@ -0,0 +1,106 @@ +#ifndef _system_time_h +#define _system_time_h +/* + Unix SMB/CIFS implementation. + + time system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef TIME_WITH_SYS_TIME +#include <sys/time.h> +#include <time.h> +#else +#ifdef HAVE_SYS_TIME_H +#include <sys/time.h> +#else +#include <time.h> +#endif +#endif + +#ifdef HAVE_UTIME_H +#include <utime.h> +#else +struct utimbuf { + time_t actime; /* access time */ + time_t modtime; /* modification time */ +}; +#endif + +#ifndef HAVE_STRUCT_TIMESPEC +struct timespec { + time_t tv_sec; /* Seconds. */ + long tv_nsec; /* Nanoseconds. */ +}; +#endif + +#ifndef HAVE_MKTIME +/* define is in "replace.h" */ +time_t rep_mktime(struct tm *t); +#endif + +#ifndef HAVE_TIMEGM +/* define is in "replace.h" */ +time_t rep_timegm(struct tm *tm); +#endif + +#ifndef HAVE_UTIME +/* define is in "replace.h" */ +int rep_utime(const char *filename, const struct utimbuf *buf); +#endif + +#ifndef HAVE_UTIMES +/* define is in "replace.h" */ +int rep_utimes(const char *filename, const struct timeval tv[2]); +#endif + +#ifndef HAVE_CLOCK_GETTIME +/* CLOCK_REALTIME is required by POSIX */ +#define CLOCK_REALTIME 0 +typedef int clockid_t; +int rep_clock_gettime(clockid_t clk_id, struct timespec *tp); +#endif +/* make sure we have a best effort CUSTOM_CLOCK_MONOTONIC we can rely on. + * + * on AIX the values of CLOCK_* are cast expressions, not integer constants, + * this prevents them from being compared against in a preprocessor directive. + * The following ...IS_* macros can be used to check which clock is in use. + */ +#if defined(CLOCK_MONOTONIC) +#define CUSTOM_CLOCK_MONOTONIC CLOCK_MONOTONIC +#define CUSTOM_CLOCK_MONOTONIC_IS_MONOTONIC +#elif defined(CLOCK_HIGHRES) +#define CUSTOM_CLOCK_MONOTONIC CLOCK_HIGHRES +#define CUSTOM_CLOCK_MONOTONIC_IS_HIGHRES +#else +#define CUSTOM_CLOCK_MONOTONIC CLOCK_REALTIME +#define CUSTOM_CLOCK_MONOTONIC_IS_REALTIME +#endif + +#ifndef UTIME_NOW +#define UTIME_NOW ((1l << 30) - 1l) +#endif +#ifndef UTIME_OMIT +#define UTIME_OMIT ((1l << 30) - 2l) +#endif + +#endif diff --git a/lib/replace/system/wait.h b/lib/replace/system/wait.h new file mode 100644 index 0000000..1f5fcd9 --- /dev/null +++ b/lib/replace/system/wait.h @@ -0,0 +1,55 @@ +#ifndef _system_wait_h +#define _system_wait_h +/* + Unix SMB/CIFS implementation. + + waitpid system include wrappers + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. + +*/ + +#ifdef HAVE_SYS_WAIT_H +#include <sys/wait.h> +#endif + +#include <signal.h> + +#ifndef SIGCLD +#define SIGCLD SIGCHLD +#endif + +#ifdef HAVE_SETJMP_H +#include <setjmp.h> +#endif + +#ifdef HAVE_SYS_UCONTEXT_H +#include <sys/ucontext.h> +#endif + +#if !defined(HAVE_SIG_ATOMIC_T_TYPE) +typedef int sig_atomic_t; +#endif + +#if !defined(HAVE_WAITPID) && defined(HAVE_WAIT4) +int rep_waitpid(pid_t pid,int *status,int options); +#endif + +#endif diff --git a/lib/replace/system/wscript_configure b/lib/replace/system/wscript_configure new file mode 100644 index 0000000..b51d917 --- /dev/null +++ b/lib/replace/system/wscript_configure @@ -0,0 +1,18 @@ +#!/usr/bin/env python + +# solaris variants of getXXent_r +conf.CHECK_C_PROTOTYPE('getpwent_r', + 'struct passwd *getpwent_r(struct passwd *src, char *buf, int buflen)', + define='SOLARIS_GETPWENT_R', headers='pwd.h') +conf.CHECK_C_PROTOTYPE('getgrent_r', + 'struct group *getgrent_r(struct group *src, char *buf, int buflen)', + define='SOLARIS_GETGRENT_R', headers='grp.h') + +# the irix variants +conf.CHECK_C_PROTOTYPE('getpwent_r', + 'struct passwd *getpwent_r(struct passwd *src, char *buf, size_t buflen)', + define='SOLARIS_GETPWENT_R', headers='pwd.h') +conf.CHECK_C_PROTOTYPE('getgrent_r', + 'struct group *getgrent_r(struct group *src, char *buf, size_t buflen)', + define='SOLARIS_GETGRENT_R', headers='grp.h') + diff --git a/lib/replace/tests/getifaddrs.c b/lib/replace/tests/getifaddrs.c new file mode 100644 index 0000000..8d575af --- /dev/null +++ b/lib/replace/tests/getifaddrs.c @@ -0,0 +1,105 @@ +/* + * Unix SMB/CIFS implementation. + * + * libreplace getifaddrs test + * + * Copyright (C) Michael Adam <obnox@samba.org> 2008 + * + * ** NOTE! The following LGPL license applies to the replace + * ** library. This does NOT imply that all of Samba is released + * ** under the LGPL + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 3 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with this library; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef AUTOCONF_TEST +#include "replace.h" +#include "system/network.h" +#include "replace-test.h" +#endif + +#ifdef HAVE_INET_NTOP +#define rep_inet_ntop inet_ntop +#endif + +static const char *format_sockaddr(struct sockaddr *addr, + char *addrstring, + socklen_t addrlen) +{ + const char *result = NULL; + + if (addr->sa_family == AF_INET) { + result = rep_inet_ntop(AF_INET, + &((struct sockaddr_in *)addr)->sin_addr, + addrstring, + addrlen); +#ifdef HAVE_STRUCT_SOCKADDR_IN6 + } else if (addr->sa_family == AF_INET6) { + result = rep_inet_ntop(AF_INET6, + &((struct sockaddr_in6 *)addr)->sin6_addr, + addrstring, + addrlen); +#endif + } + return result; +} + +int getifaddrs_test(void) +{ + struct ifaddrs *ifs = NULL; + struct ifaddrs *ifs_head = NULL; + int ret; + + ret = getifaddrs(&ifs); + ifs_head = ifs; + if (ret != 0) { + fprintf(stderr, "getifaddrs() failed: %s\n", strerror(errno)); + return 1; + } + + while (ifs) { + printf("%-10s ", ifs->ifa_name); + if (ifs->ifa_addr != NULL) { + char addrstring[INET6_ADDRSTRLEN]; + const char *result; + + result = format_sockaddr(ifs->ifa_addr, + addrstring, + sizeof(addrstring)); + if (result != NULL) { + printf("IP=%s ", addrstring); + } + + if (ifs->ifa_netmask != NULL) { + result = format_sockaddr(ifs->ifa_netmask, + addrstring, + sizeof(addrstring)); + if (result != NULL) { + printf("NETMASK=%s", addrstring); + } + } else { + printf("AF=%d ", ifs->ifa_addr->sa_family); + } + } else { + printf("<no address>"); + } + + printf("\n"); + ifs = ifs->ifa_next; + } + + freeifaddrs(ifs_head); + + return 0; +} diff --git a/lib/replace/tests/incoherent_mmap.c b/lib/replace/tests/incoherent_mmap.c new file mode 100644 index 0000000..ee288fd --- /dev/null +++ b/lib/replace/tests/incoherent_mmap.c @@ -0,0 +1,83 @@ +/* In OpenBSD, if you write to a file, another process doesn't see it + * in its mmap. Returns with exit status 0 if that is the case, 1 if + * it's coherent, and other if there's a problem. */ +#include <err.h> +#include <sys/mman.h> +#include <unistd.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <stdlib.h> +#include <string.h> +#include <errno.h> + +#define DATA "coherent.mmap" + +int main(int argc, char *argv[]) +{ + int tochild[2], toparent[2]; + int fd; + volatile unsigned char *map; + unsigned char *page; + const char *fname = argv[1]; + char c = 0; + + if (pipe(tochild) != 0 || pipe(toparent) != 0) + err(2, "Creating pipe"); + + if (!fname) + fname = DATA; + + fd = open(fname, O_RDWR|O_CREAT|O_TRUNC, 0600); + if (fd < 0) + err(2, "opening %s", fname); + unlink(fname); + + switch (fork()) { + case -1: + err(2, "Fork"); + case 0: + close(tochild[1]); + close(toparent[0]); + + /* Wait for parent to create file. */ + if (read(tochild[0], &c, 1) != 1) + err(2, "reading from parent"); + + /* Alter first byte. */ + pwrite(fd, &c, 1, 0); + + if (write(toparent[1], &c, 1) != 1) + err(2, "writing to parent"); + exit(0); + + default: + close(tochild[0]); + close(toparent[1]); + + /* Create a file and mmap it. */ + page = malloc(getpagesize()); + memset(page, 0x42, getpagesize()); + if (write(fd, page, getpagesize()) != getpagesize()) + err(2, "writing first page"); + map = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, + MAP_SHARED, fd, 0); + if (map == MAP_FAILED) + err(2, "mapping file"); + + if (*map != 0x42) + errx(2, "first byte isn't 0x42!"); + + /* Tell child to alter file. */ + if (write(tochild[1], &c, 1) != 1) + err(2, "writing to child"); + + if (read(toparent[0], &c, 1) != 1) + err(2, "reading from child"); + + if (*map) + errx(0, "mmap incoherent: first byte isn't 0."); + + exit(1); + } +} diff --git a/lib/replace/tests/main.c b/lib/replace/tests/main.c new file mode 100644 index 0000000..94264d7 --- /dev/null +++ b/lib/replace/tests/main.c @@ -0,0 +1,35 @@ +/* + Unix SMB/CIFS implementation. + + libreplace tests + + Copyright (C) Jelmer Vernooij 2006 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" +#include "replace-testsuite.h" + +int main(void) +{ + bool ret = torture_local_replace(NULL); + if (ret) + return 0; + return -1; +} diff --git a/lib/replace/tests/os2_delete.c b/lib/replace/tests/os2_delete.c new file mode 100644 index 0000000..a3e45f7 --- /dev/null +++ b/lib/replace/tests/os2_delete.c @@ -0,0 +1,135 @@ +/* + test readdir/unlink pattern that OS/2 uses + tridge@samba.org July 2005 +*/ + +#include <stdio.h> +#include <stdlib.h> +#include <sys/stat.h> +#include <unistd.h> +#include <sys/types.h> +#include <dirent.h> +#include <errno.h> +#include <string.h> +#include <fcntl.h> +#include "replace-test.h" + +#define NUM_FILES 700 +#define READDIR_SIZE 100 +#define DELETE_SIZE 4 + +#define TESTDIR "test.dir" + +static int test_readdir_os2_delete_ret; + +#define FAILED(d) (printf("failure: readdir [\nFailed for %s - %d = %s\n]\n", d, errno, strerror(errno)), test_readdir_os2_delete_ret = 1) + +#ifndef MIN +#define MIN(a,b) ((a)<(b)?(a):(b)) +#endif + +#ifdef _WIN32 +#define mkdir(d,m) _mkdir(d) +#endif + +static void cleanup(void) +{ + /* I'm a lazy bastard */ + if (system("rm -rf " TESTDIR)) { + FAILED("system"); + } + mkdir(TESTDIR, 0700) == 0 || FAILED("mkdir"); +} + +static void create_files(void) +{ + int i; + for (i=0;i<NUM_FILES;i++) { + char fname[40]; + int fd; + snprintf(fname, sizeof(fname), TESTDIR "/test%u.txt", i); + fd = open(fname, O_CREAT|O_RDWR, 0600); + if (fd < 0) { + FAILED("open"); + } + if (close(fd) != 0) { + FAILED("close"); + } + } +} + +static int os2_delete(DIR *d) +{ + off_t offsets[READDIR_SIZE]; + int i, j; + struct dirent *de; + char names[READDIR_SIZE][256]; + + /* scan, remembering offsets */ + for (i=0, de=readdir(d); + de && i < READDIR_SIZE; + de=readdir(d), i++) { + offsets[i] = telldir(d); + /* strlcpy not available here */ + snprintf(names[i], sizeof(names[i]), "%s", de->d_name); + } + + if (i == 0) { + return 0; + } + + /* delete the first few */ + for (j=0; j<MIN(i, DELETE_SIZE); j++) { + char fname[40]; + snprintf(fname, sizeof(fname), TESTDIR "/%s", names[j]); + unlink(fname) == 0 || FAILED("unlink"); + } + + /* seek to just after the deletion */ + seekdir(d, offsets[j-1]); + + /* return number deleted */ + return j; +} + +int test_readdir_os2_delete(void) +{ + int total_deleted = 0; + DIR *d; + struct dirent *de; + + test_readdir_os2_delete_ret = 0; + + cleanup(); + create_files(); + + d = opendir(TESTDIR "/test0.txt"); + if (d != NULL) FAILED("opendir() on file succeed"); + if (errno != ENOTDIR) FAILED("opendir() on file didn't give ENOTDIR"); + if (d != NULL) closedir(d); + + d = opendir(TESTDIR); + + /* skip past . and .. */ + de = readdir(d); + strcmp(de->d_name, ".") == 0 || FAILED("match ."); + de = readdir(d); + strcmp(de->d_name, "..") == 0 || FAILED("match .."); + + while (1) { + int n = os2_delete(d); + if (n == 0) break; + total_deleted += n; + } + closedir(d); + + fprintf(stderr, "Deleted %d files of %d\n", total_deleted, NUM_FILES); + + rmdir(TESTDIR) == 0 || FAILED("rmdir"); + + if (system("rm -rf " TESTDIR) == -1) { + FAILED("system"); + } + + return test_readdir_os2_delete_ret; +} diff --git a/lib/replace/tests/shared_mmap.c b/lib/replace/tests/shared_mmap.c new file mode 100644 index 0000000..9d6e3fc --- /dev/null +++ b/lib/replace/tests/shared_mmap.c @@ -0,0 +1,71 @@ +/* this tests whether we can use a shared writeable mmap on a file - + as needed for the mmap variant of FAST_SHARE_MODES */ + +#if defined(HAVE_UNISTD_H) +#include <unistd.h> +#endif +#ifdef HAVE_STDLIB_H +#include <stdlib.h> +#endif +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> + +#define DATA "conftest.mmap" + +#ifndef MAP_FILE +#define MAP_FILE 0 +#endif + +int main(void) +{ + int *buf; + int i; + int fd = open(DATA,O_RDWR|O_CREAT|O_TRUNC,0666); + int count=7; + + if (fd == -1) exit(1); + + for (i=0;i<10000;i++) { + write(fd,&i,sizeof(i)); + } + + close(fd); + + if (fork() == 0) { + fd = open(DATA,O_RDWR); + if (fd == -1) exit(1); + + buf = (int *)mmap(NULL, 10000*sizeof(int), + (PROT_READ | PROT_WRITE), + MAP_FILE | MAP_SHARED, + fd, 0); + + while (count-- && buf[9124] != 55732) sleep(1); + + if (count <= 0) exit(1); + + buf[1763] = 7268; + exit(0); + } + + fd = open(DATA,O_RDWR); + if (fd == -1) exit(1); + + buf = (int *)mmap(NULL, 10000*sizeof(int), + (PROT_READ | PROT_WRITE), + MAP_FILE | MAP_SHARED, + fd, 0); + + if (buf == (int *)-1) exit(1); + + buf[9124] = 55732; + + while (count-- && buf[1763] != 7268) sleep(1); + + unlink(DATA); + + if (count > 0) exit(0); + exit(1); +} diff --git a/lib/replace/tests/shared_mremap.c b/lib/replace/tests/shared_mremap.c new file mode 100644 index 0000000..08040e2 --- /dev/null +++ b/lib/replace/tests/shared_mremap.c @@ -0,0 +1,51 @@ +/* this tests whether we can use mremap */ + +#if defined(HAVE_UNISTD_H) +#include <unistd.h> +#endif +#ifdef HAVE_STDLIB_H +#include <stdlib.h> +#endif +#include <sys/mman.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> + +#define DATA "conftest.mmap" + +#ifndef MAP_FILE +#define MAP_FILE 0 +#endif + +#ifndef MAP_FAILED +#define MAP_FAILED (int *)-1 +#endif + +int main(void) +{ + int *buf; + int fd; + int err = 1; + + fd = open(DATA, O_RDWR|O_CREAT|O_TRUNC, 0666); + if (fd == -1) { + exit(1); + } + + buf = (int *)mmap(NULL, 0x1000, PROT_READ | PROT_WRITE, + MAP_FILE | MAP_SHARED, fd, 0); + if (buf == MAP_FAILED) { + goto done; + } + + buf = mremap(buf, 0x1000, 0x2000, MREMAP_MAYMOVE); + if (buf == MAP_FAILED) { + goto done; + } + + err = 0; +done: + close(fd); + unlink(DATA); + exit(err); +} diff --git a/lib/replace/tests/snprintf.c b/lib/replace/tests/snprintf.c new file mode 100644 index 0000000..77473f0 --- /dev/null +++ b/lib/replace/tests/snprintf.c @@ -0,0 +1,29 @@ +void foo(const char *format, ...) +{ + va_list ap; + int len; + char buf[20]; + long long l = 1234567890; + l *= 100; + + va_start(ap, format); + len = vsnprintf(buf, 0, format, ap); + va_end(ap); + if (len != 5) exit(1); + + va_start(ap, format); + len = vsnprintf(0, 0, format, ap); + va_end(ap); + if (len != 5) exit(2); + + if (snprintf(buf, 3, "hello") != 5 || strcmp(buf, "he") != 0) exit(3); + + if (snprintf(buf, 20, "%lld", l) != 12 || strcmp(buf, "123456789000") != 0) exit(4); + if (snprintf(buf, 20, "%zu", 123456789) != 9 || strcmp(buf, "123456789") != 0) exit(5); + if (snprintf(buf, 20, "%2\$d %1\$d", 3, 4) != 3 || strcmp(buf, "4 3") != 0) exit(6); + if (snprintf(buf, 20, "%s", 0) < 3) exit(7); + + printf("1"); + exit(0); +} +int main(void) { foo("hello"); } diff --git a/lib/replace/tests/strptime.c b/lib/replace/tests/strptime.c new file mode 100644 index 0000000..5bf03f5 --- /dev/null +++ b/lib/replace/tests/strptime.c @@ -0,0 +1,173 @@ + +#ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME + +#include <stdio.h> +#include <stdlib.h> +#include <time.h> + +#define true 1 +#define false 0 + +#ifndef __STRING +#define __STRING(x) #x +#endif + +/* make printf a no-op */ +#define printf if(0) printf + +#else /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ + +#include "replace.h" +#include "system/time.h" +#include "replace-test.h" + +#endif /* LIBREPLACE_CONFIGURE_TEST_STRPTIME */ + +int libreplace_test_strptime(void) +{ + const char *s = "20070414101546Z"; + char *ret; + struct tm t, t2; + + memset(&t, 0, sizeof(t)); + memset(&t2, 0, sizeof(t2)); + + printf("test: strptime\n"); + + ret = strptime(s, "%Y%m%d%H%M%S", &t); + if ( ret == NULL ) { + printf("failure: strptime [\n" + "returned NULL\n" + "]\n"); + return false; + } + + if ( *ret != 'Z' ) { + printf("failure: strptime [\n" + "ret doesn't point to 'Z'\n" + "]\n"); + return false; + } + + ret = strptime(s, "%Y%m%d%H%M%SZ", &t2); + if ( ret == NULL ) { + printf("failure: strptime [\n" + "returned NULL with Z\n" + "]\n"); + return false; + } + + if ( *ret != '\0' ) { + printf("failure: strptime [\n" + "ret doesn't point to '\\0'\n" + "]\n"); + return false; + } + +#define CMP_TM_ELEMENT(t1,t2,elem) \ + if (t1.elem != t2.elem) { \ + printf("failure: strptime [\n" \ + "result differs if the format string has a 'Z' at the end\n" \ + "element: %s %d != %d\n" \ + "]\n", \ + __STRING(elen), t1.elem, t2.elem); \ + return false; \ + } + + CMP_TM_ELEMENT(t,t2,tm_sec); + CMP_TM_ELEMENT(t,t2,tm_min); + CMP_TM_ELEMENT(t,t2,tm_hour); + CMP_TM_ELEMENT(t,t2,tm_mday); + CMP_TM_ELEMENT(t,t2,tm_mon); + CMP_TM_ELEMENT(t,t2,tm_year); + CMP_TM_ELEMENT(t,t2,tm_wday); + CMP_TM_ELEMENT(t,t2,tm_yday); + CMP_TM_ELEMENT(t,t2,tm_isdst); + + if (t.tm_sec != 46) { + printf("failure: strptime [\n" + "tm_sec: expected: 46, got: %d\n" + "]\n", + t.tm_sec); + return false; + } + + if (t.tm_min != 15) { + printf("failure: strptime [\n" + "tm_min: expected: 15, got: %d\n" + "]\n", + t.tm_min); + return false; + } + + if (t.tm_hour != 10) { + printf("failure: strptime [\n" + "tm_hour: expected: 10, got: %d\n" + "]\n", + t.tm_hour); + return false; + } + + if (t.tm_mday != 14) { + printf("failure: strptime [\n" + "tm_mday: expected: 14, got: %d\n" + "]\n", + t.tm_mday); + return false; + } + + if (t.tm_mon != 3) { + printf("failure: strptime [\n" + "tm_mon: expected: 3, got: %d\n" + "]\n", + t.tm_mon); + return false; + } + + if (t.tm_year != 107) { + printf("failure: strptime [\n" + "tm_year: expected: 107, got: %d\n" + "]\n", + t.tm_year); + return false; + } + + if (t.tm_wday != 6) { /* saturday */ + printf("failure: strptime [\n" + "tm_wday: expected: 6, got: %d\n" + "]\n", + t.tm_wday); + return false; + } + + if (t.tm_yday != 103) { + printf("failure: strptime [\n" + "tm_yday: expected: 103, got: %d\n" + "]\n", + t.tm_yday); + return false; + } + + /* we don't test this as it depends on the host configuration + if (t.tm_isdst != 0) { + printf("failure: strptime [\n" + "tm_isdst: expected: 0, got: %d\n" + "]\n", + t.tm_isdst); + return false; + }*/ + + printf("success: strptime\n"); + + return true; +} + +#ifdef LIBREPLACE_CONFIGURE_TEST_STRPTIME +int main (void) +{ + int ret; + ret = libreplace_test_strptime(); + if (ret == false) return 1; + return 0; +} +#endif diff --git a/lib/replace/tests/testsuite.c b/lib/replace/tests/testsuite.c new file mode 100644 index 0000000..58624f1 --- /dev/null +++ b/lib/replace/tests/testsuite.c @@ -0,0 +1,1227 @@ +/* + Unix SMB/CIFS implementation. + + libreplace tests + + Copyright (C) Jelmer Vernooij 2006 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" +#include "replace-test.h" +#include "replace-testsuite.h" + +/* + we include all the system/ include files here so that libreplace tests + them in the build farm +*/ +#include "system/capability.h" +#include "system/dir.h" +#include "system/filesys.h" +#include "system/glob.h" +#include "system/iconv.h" +#include "system/locale.h" +#include "system/network.h" +#include "system/passwd.h" +#include "system/readline.h" +#include "system/select.h" +#include "system/shmem.h" +#include "system/syslog.h" +#include "system/terminal.h" +#include "system/time.h" +#include "system/wait.h" + +#define TESTFILE "testfile.dat" + + +/* + test ftruncate() function + */ +static int test_ftruncate(void) +{ + struct stat st; + int fd; + const int size = 1234; + printf("test: ftruncate\n"); + unlink(TESTFILE); + fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); + if (fd == -1) { + printf("failure: ftruncate [\n" + "creating '%s' failed - %s\n]\n", TESTFILE, strerror(errno)); + return false; + } + if (ftruncate(fd, size) != 0) { + printf("failure: ftruncate [\n%s\n]\n", strerror(errno)); + close(fd); + return false; + } + if (fstat(fd, &st) != 0) { + printf("failure: ftruncate [\nfstat failed - %s\n]\n", strerror(errno)); + close(fd); + return false; + } + if (st.st_size != size) { + printf("failure: ftruncate [\ngave wrong size %d - expected %d\n]\n", + (int)st.st_size, size); + close(fd); + return false; + } + unlink(TESTFILE); + printf("success: ftruncate\n"); + close(fd); + return true; +} + +/* + test strlcpy() function. + see http://www.gratisoft.us/todd/papers/strlcpy.html + */ +static int test_strlcpy(void) +{ + char buf[4]; + const struct { + const char *src; + size_t result; + } tests[] = { + { "abc", 3 }, + { "abcdef", 6 }, + { "abcd", 4 }, + { "", 0 }, + { NULL, 0 } + }; + int i; + printf("test: strlcpy\n"); + for (i=0;tests[i].src;i++) { + if (strlcpy(buf, tests[i].src, sizeof(buf)) != tests[i].result) { + printf("failure: strlcpy [\ntest %d failed\n]\n", i); + return false; + } + } + printf("success: strlcpy\n"); + return true; +} + +static int test_strlcat(void) +{ + char tmp[10]; + printf("test: strlcat\n"); + strlcpy(tmp, "", sizeof(tmp)); + if (strlcat(tmp, "bla", 3) != 3) { + printf("failure: strlcat [\ninvalid return code\n]\n"); + return false; + } + if (strcmp(tmp, "bl") != 0) { + printf("failure: strlcat [\nexpected \"bl\", got \"%s\"\n]\n", + tmp); + return false; + } + + strlcpy(tmp, "da", sizeof(tmp)); + if (strlcat(tmp, "me", 4) != 4) { + printf("failure: strlcat [\nexpected \"dam\", got \"%s\"\n]\n", + tmp); + return false; + } + + printf("success: strlcat\n"); + return true; +} + +static int test_mktime(void) +{ + /* FIXME */ + return true; +} + +static int test_initgroups(void) +{ + /* FIXME */ + return true; +} + +static int test_memmove(void) +{ + /* FIXME */ + return true; +} + +static int test_strdup(void) +{ + char *x; + int cmp; + + printf("test: strdup\n"); + x = strdup("bla"); + + cmp = strcmp("bla", x); + if (cmp != 0) { + printf("failure: strdup [\nfailed: expected \"bla\", got \"%s\"\n]\n", + x); + free(x); + return false; + } + free(x); + printf("success: strdup\n"); + return true; +} + +static int test_setlinebuf(void) +{ + printf("test: setlinebuf\n"); + setlinebuf(stdout); + printf("success: setlinebuf\n"); + return true; +} + +static int test_vsyslog(void) +{ + /* FIXME */ + return true; +} + +static int test_timegm(void) +{ + /* FIXME */ + return true; +} + +static int test_setenv(void) +{ +#define TEST_SETENV(key, value, overwrite, result) do { \ + int _ret; \ + char *_v; \ + _ret = setenv(key, value, overwrite); \ + if (_ret != 0) { \ + printf("failure: setenv [\n" \ + "setenv(%s, %s, %d) failed\n" \ + "]\n", \ + key, value, overwrite); \ + return false; \ + } \ + _v=getenv(key); \ + if (!_v) { \ + printf("failure: setenv [\n" \ + "getenv(%s) returned NULL\n" \ + "]\n", \ + key); \ + return false; \ + } \ + if (strcmp(result, _v) != 0) { \ + printf("failure: setenv [\n" \ + "getenv(%s): '%s' != '%s'\n" \ + "]\n", \ + key, result, _v); \ + return false; \ + } \ +} while(0) + +#define TEST_UNSETENV(key) do { \ + char *_v; \ + unsetenv(key); \ + _v=getenv(key); \ + if (_v) { \ + printf("failure: setenv [\n" \ + "getenv(%s): NULL != '%s'\n" \ + "]\n", \ + SETENVTEST_KEY, _v); \ + return false; \ + } \ +} while (0) + +#define SETENVTEST_KEY "SETENVTESTKEY" +#define SETENVTEST_VAL "SETENVTESTVAL" + + printf("test: setenv\n"); + TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"1", 0, SETENVTEST_VAL"1"); + TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"2", 0, SETENVTEST_VAL"1"); + TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"3", 1, SETENVTEST_VAL"3"); + TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"4", 1, SETENVTEST_VAL"4"); + TEST_UNSETENV(SETENVTEST_KEY); + TEST_UNSETENV(SETENVTEST_KEY); + TEST_SETENV(SETENVTEST_KEY, SETENVTEST_VAL"5", 0, SETENVTEST_VAL"5"); + TEST_UNSETENV(SETENVTEST_KEY); + TEST_UNSETENV(SETENVTEST_KEY); + printf("success: setenv\n"); + return true; +} + +static int test_strndup(void) +{ + char *x; + int cmp; + + printf("test: strndup\n"); + x = strndup("bla", 0); + cmp = strcmp(x, ""); + free(x); + if (cmp != 0) { + printf("failure: strndup [\ninvalid\n]\n"); + return false; + } + + x = strndup("bla", 2); + cmp = strcmp(x, "bl"); + free(x); + if (cmp != 0) { + printf("failure: strndup [\ninvalid\n]\n"); + return false; + } + +#ifdef __GNUC__ +# if __GNUC__ < 11 + /* + * This code will not compile with gcc11 -O3 anymore. + * + * error: ‘strndup’ specified bound 10 exceeds source size 4 [-Werror=stringop-overread] + * x = strndup("bla", 10); + * ^~~~~~~~~~~~~~~~~~ + */ + x = strndup("bla", 10); + cmp = strcmp(x, "bla"); + free(x); + if (cmp != 0) { + printf("failure: strndup [\ninvalid\n]\n"); + return false; + } +# endif +#endif /* __GNUC__ */ + + printf("success: strndup\n"); + return true; +} + +static int test_strnlen(void) +{ + char longlen[20] = { 0 }; + + printf("test: strnlen\n"); + if (strnlen("bla", 2) != 2) { + printf("failure: strnlen [\nunexpected length\n]\n"); + return false; + } + + if (strnlen("some text\n", 0) != 0) { + printf("failure: strnlen [\nunexpected length\n]\n"); + return false; + } + + memcpy(longlen, "some text", 10); + + if (strnlen(longlen, 20) != 9) { + printf("failure: strnlen [\nunexpected length\n]\n"); + return false; + } + + printf("success: strnlen\n"); + return true; +} + +static int test_waitpid(void) +{ + /* FIXME */ + return true; +} + +static int test_seteuid(void) +{ + /* FIXME */ + return true; +} + +static int test_setegid(void) +{ + /* FIXME */ + return true; +} + +static int test_asprintf(void) +{ + char *x = NULL; + + printf("test: asprintf\n"); + if (asprintf(&x, "%d", 9) != 1) { + printf("failure: asprintf [\ngenerate asprintf\n]\n"); + free(x); + return false; + } + if (strcmp(x, "9") != 0) { + printf("failure: asprintf [\ngenerate asprintf\n]\n"); + free(x); + return false; + } + if (asprintf(&x, "dat%s", "a") != 4) { + printf("failure: asprintf [\ngenerate asprintf\n]\n"); + free(x); + return false; + } + if (strcmp(x, "data") != 0) { + printf("failure: asprintf [\ngenerate asprintf\n]\n"); + free(x); + return false; + } + free(x); + printf("success: asprintf\n"); + return true; +} + +static int test_snprintf(void) +{ + char tmp[10]; + printf("test: snprintf\n"); + if (snprintf(tmp, 3, "foo%d", 9) != 4) { + printf("failure: snprintf [\nsnprintf return code failed\n]\n"); + return false; + } + + if (strcmp(tmp, "fo") != 0) { + printf("failure: snprintf [\nsnprintf failed\n]\n"); + return false; + } + + printf("success: snprintf\n"); + return true; +} + +static int test_vasprintf(void) +{ + /* FIXME */ + return true; +} + +static int test_vsnprintf(void) +{ + /* FIXME */ + return true; +} + +static int test_opendir(void) +{ + /* FIXME */ + return true; +} + +static int test_readdir(void) +{ + printf("test: readdir\n"); + if (test_readdir_os2_delete() != 0) { + return false; + } + printf("success: readdir\n"); + return true; +} + +static int test_telldir(void) +{ + /* FIXME */ + return true; +} + +static int test_seekdir(void) +{ + /* FIXME */ + return true; +} + +static int test_dlopen(void) +{ + /* FIXME: test dlopen, dlsym, dlclose, dlerror */ + return true; +} + + +static int test_chroot(void) +{ + /* FIXME: chroot() */ + return true; +} + +static int test_bzero(void) +{ + /* FIXME: bzero */ + return true; +} + +static int test_strerror(void) +{ + /* FIXME */ + return true; +} + +static int test_errno(void) +{ + printf("test: errno\n"); + errno = 3; + if (errno != 3) { + printf("failure: errno [\nerrno failed\n]\n"); + return false; + } + + printf("success: errno\n"); + return true; +} + +static int test_mkdtemp(void) +{ + /* FIXME */ + return true; +} + +static int test_mkstemp(void) +{ + /* FIXME */ + return true; +} + +static int test_pread(void) +{ + /* FIXME */ + return true; +} + +static int test_pwrite(void) +{ + /* FIXME */ + return true; +} + +static int test_inet_ntoa(void) +{ + /* FIXME */ + return true; +} + +#define TEST_STRTO_X(type,fmt,func,str,base,res,diff,rrnoo) do {\ + type _v; \ + char _s[64]; \ + char *_p = NULL;\ + char *_ep = NULL; \ + strlcpy(_s, str, sizeof(_s));\ + if (diff >= 0) { \ + _ep = &_s[diff]; \ + } \ + errno = 0; \ + _v = func(_s, &_p, base); \ + if (errno != rrnoo) { \ + printf("failure: %s [\n" \ + "\t%s\n" \ + "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ + "\terrno: %d != %d\n" \ + "]\n", \ + __STRING(func), __location__, __STRING(func), \ + str, diff, base, res, _v, rrnoo, errno); \ + return false; \ + } else if (_v != res) { \ + printf("failure: %s [\n" \ + "\t%s\n" \ + "\t%s(\"%s\",%d,%d): " fmt " != " fmt "\n" \ + "]\n", \ + __STRING(func), __location__, __STRING(func), \ + str, diff, base, res, _v); \ + return false; \ + } else if (_p != _ep) { \ + printf("failure: %s [\n" \ + "\t%s\n" \ + "\t%s(\"%s\",%d,%d): " fmt " (=/!)= " fmt "\n" \ + "\tptr: %p - %p = %d != %d\n" \ + "]\n", \ + __STRING(func), __location__, __STRING(func), \ + str, diff, base, res, _v, _ep, _p, (int)(diff - (_ep - _p)), diff); \ + return false; \ + } \ +} while (0) + +static int test_strtoll(void) +{ + printf("test: strtoll\n"); + +#define TEST_STRTOLL(str,base,res,diff,errnoo) TEST_STRTO_X(long long int, "%lld", strtoll,str,base,res,diff,errnoo) + + TEST_STRTOLL("15", 10, 15LL, 2, 0); + TEST_STRTOLL(" 15", 10, 15LL, 4, 0); + TEST_STRTOLL("15", 0, 15LL, 2, 0); + TEST_STRTOLL(" 15 ", 0, 15LL, 3, 0); + TEST_STRTOLL("+15", 10, 15LL, 3, 0); + TEST_STRTOLL(" +15", 10, 15LL, 5, 0); + TEST_STRTOLL("+15", 0, 15LL, 3, 0); + TEST_STRTOLL(" +15 ", 0, 15LL, 4, 0); + TEST_STRTOLL("-15", 10, -15LL, 3, 0); + TEST_STRTOLL(" -15", 10, -15LL, 5, 0); + TEST_STRTOLL("-15", 0, -15LL, 3, 0); + TEST_STRTOLL(" -15 ", 0, -15LL, 4, 0); + TEST_STRTOLL("015", 10, 15LL, 3, 0); + TEST_STRTOLL(" 015", 10, 15LL, 5, 0); + TEST_STRTOLL("015", 0, 13LL, 3, 0); + TEST_STRTOLL(" 015", 0, 13LL, 5, 0); + TEST_STRTOLL("0x15", 10, 0LL, 1, 0); + TEST_STRTOLL(" 0x15", 10, 0LL, 3, 0); + TEST_STRTOLL("0x15", 0, 21LL, 4, 0); + TEST_STRTOLL(" 0x15", 0, 21LL, 6, 0); + + TEST_STRTOLL("10", 16, 16LL, 2, 0); + TEST_STRTOLL(" 10 ", 16, 16LL, 4, 0); + TEST_STRTOLL("0x10", 16, 16LL, 4, 0); + TEST_STRTOLL("0x10", 0, 16LL, 4, 0); + TEST_STRTOLL(" 0x10 ", 0, 16LL, 5, 0); + TEST_STRTOLL("+10", 16, 16LL, 3, 0); + TEST_STRTOLL(" +10 ", 16, 16LL, 5, 0); + TEST_STRTOLL("+0x10", 16, 16LL, 5, 0); + TEST_STRTOLL("+0x10", 0, 16LL, 5, 0); + TEST_STRTOLL(" +0x10 ", 0, 16LL, 6, 0); + TEST_STRTOLL("-10", 16, -16LL, 3, 0); + TEST_STRTOLL(" -10 ", 16, -16LL, 5, 0); + TEST_STRTOLL("-0x10", 16, -16LL, 5, 0); + TEST_STRTOLL("-0x10", 0, -16LL, 5, 0); + TEST_STRTOLL(" -0x10 ", 0, -16LL, 6, 0); + TEST_STRTOLL("010", 16, 16LL, 3, 0); + TEST_STRTOLL(" 010 ", 16, 16LL, 5, 0); + TEST_STRTOLL("-010", 16, -16LL, 4, 0); + + TEST_STRTOLL("11", 8, 9LL, 2, 0); + TEST_STRTOLL("011", 8, 9LL, 3, 0); + TEST_STRTOLL("011", 0, 9LL, 3, 0); + TEST_STRTOLL("-11", 8, -9LL, 3, 0); + TEST_STRTOLL("-011", 8, -9LL, 4, 0); + TEST_STRTOLL("-011", 0, -9LL, 4, 0); + + TEST_STRTOLL("011", 8, 9LL, 3, 0); + TEST_STRTOLL("011", 0, 9LL, 3, 0); + TEST_STRTOLL("-11", 8, -9LL, 3, 0); + TEST_STRTOLL("-011", 8, -9LL, 4, 0); + TEST_STRTOLL("-011", 0, -9LL, 4, 0); + + TEST_STRTOLL("Text", 0, 0LL, 0, 0); + + TEST_STRTOLL("9223372036854775807", 10, 9223372036854775807LL, 19, 0); + TEST_STRTOLL("9223372036854775807", 0, 9223372036854775807LL, 19, 0); + TEST_STRTOLL("9223372036854775808", 0, 9223372036854775807LL, 19, ERANGE); + TEST_STRTOLL("9223372036854775808", 10, 9223372036854775807LL, 19, ERANGE); + TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LL, 18, 0); + TEST_STRTOLL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 18, 0); + TEST_STRTOLL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LL, 16, 0); + TEST_STRTOLL("0x8000000000000000", 0, 9223372036854775807LL, 18, ERANGE); + TEST_STRTOLL("0x8000000000000000", 16, 9223372036854775807LL, 18, ERANGE); + TEST_STRTOLL("80000000000000000", 16, 9223372036854775807LL, 17, ERANGE); + TEST_STRTOLL("0777777777777777777777", 0, 9223372036854775807LL, 22, 0); + TEST_STRTOLL("0777777777777777777777", 8, 9223372036854775807LL, 22, 0); + TEST_STRTOLL("777777777777777777777", 8, 9223372036854775807LL, 21, 0); + TEST_STRTOLL("01000000000000000000000", 0, 9223372036854775807LL, 23, ERANGE); + TEST_STRTOLL("01000000000000000000000", 8, 9223372036854775807LL, 23, ERANGE); + TEST_STRTOLL("1000000000000000000000", 8, 9223372036854775807LL, 22, ERANGE); + + TEST_STRTOLL("-9223372036854775808", 10, -9223372036854775807LL -1, 20, 0); + TEST_STRTOLL("-9223372036854775808", 0, -9223372036854775807LL -1, 20, 0); + TEST_STRTOLL("-9223372036854775809", 0, -9223372036854775807LL -1, 20, ERANGE); + TEST_STRTOLL("-9223372036854775809", 10, -9223372036854775807LL -1, 20, ERANGE); + TEST_STRTOLL("-0x8000000000000000", 0, -9223372036854775807LL -1, 19, 0); + TEST_STRTOLL("-0x8000000000000000", 16, -9223372036854775807LL -1, 19, 0); + TEST_STRTOLL("-8000000000000000", 16, -9223372036854775807LL -1, 17, 0); + TEST_STRTOLL("-0x8000000000000001", 0, -9223372036854775807LL -1, 19, ERANGE); + TEST_STRTOLL("-0x8000000000000001", 16, -9223372036854775807LL -1, 19, ERANGE); + TEST_STRTOLL("-80000000000000001", 16, -9223372036854775807LL -1, 18, ERANGE); + TEST_STRTOLL("-01000000000000000000000",0, -9223372036854775807LL -1, 24, 0); + TEST_STRTOLL("-01000000000000000000000",8, -9223372036854775807LL -1, 24, 0); + TEST_STRTOLL("-1000000000000000000000", 8, -9223372036854775807LL -1, 23, 0); + TEST_STRTOLL("-01000000000000000000001",0, -9223372036854775807LL -1, 24, ERANGE); + TEST_STRTOLL("-01000000000000000000001",8, -9223372036854775807LL -1, 24, ERANGE); + TEST_STRTOLL("-1000000000000000000001", 8, -9223372036854775807LL -1, 23, ERANGE); + + printf("success: strtoll\n"); + return true; +} + +static int test_strtoull(void) +{ + printf("test: strtoull\n"); + +#define TEST_STRTOULL(str,base,res,diff,errnoo) TEST_STRTO_X(long long unsigned int,"%llu",strtoull,str,base,res,diff,errnoo) + + TEST_STRTOULL("15", 10, 15LLU, 2, 0); + TEST_STRTOULL(" 15", 10, 15LLU, 4, 0); + TEST_STRTOULL("15", 0, 15LLU, 2, 0); + TEST_STRTOULL(" 15 ", 0, 15LLU, 3, 0); + TEST_STRTOULL("+15", 10, 15LLU, 3, 0); + TEST_STRTOULL(" +15", 10, 15LLU, 5, 0); + TEST_STRTOULL("+15", 0, 15LLU, 3, 0); + TEST_STRTOULL(" +15 ", 0, 15LLU, 4, 0); + TEST_STRTOULL("-15", 10, 18446744073709551601LLU, 3, 0); + TEST_STRTOULL(" -15", 10, 18446744073709551601LLU, 5, 0); + TEST_STRTOULL("-15", 0, 18446744073709551601LLU, 3, 0); + TEST_STRTOULL(" -15 ", 0, 18446744073709551601LLU, 4, 0); + TEST_STRTOULL("015", 10, 15LLU, 3, 0); + TEST_STRTOULL(" 015", 10, 15LLU, 5, 0); + TEST_STRTOULL("015", 0, 13LLU, 3, 0); + TEST_STRTOULL(" 015", 0, 13LLU, 5, 0); + TEST_STRTOULL("0x15", 10, 0LLU, 1, 0); + TEST_STRTOULL(" 0x15", 10, 0LLU, 3, 0); + TEST_STRTOULL("0x15", 0, 21LLU, 4, 0); + TEST_STRTOULL(" 0x15", 0, 21LLU, 6, 0); + + TEST_STRTOULL("10", 16, 16LLU, 2, 0); + TEST_STRTOULL(" 10 ", 16, 16LLU, 4, 0); + TEST_STRTOULL("0x10", 16, 16LLU, 4, 0); + TEST_STRTOULL("0x10", 0, 16LLU, 4, 0); + TEST_STRTOULL(" 0x10 ", 0, 16LLU, 5, 0); + TEST_STRTOULL("+10", 16, 16LLU, 3, 0); + TEST_STRTOULL(" +10 ", 16, 16LLU, 5, 0); + TEST_STRTOULL("+0x10", 16, 16LLU, 5, 0); + TEST_STRTOULL("+0x10", 0, 16LLU, 5, 0); + TEST_STRTOULL(" +0x10 ", 0, 16LLU, 6, 0); + TEST_STRTOULL("-10", 16, -16LLU, 3, 0); + TEST_STRTOULL(" -10 ", 16, -16LLU, 5, 0); + TEST_STRTOULL("-0x10", 16, -16LLU, 5, 0); + TEST_STRTOULL("-0x10", 0, -16LLU, 5, 0); + TEST_STRTOULL(" -0x10 ", 0, -16LLU, 6, 0); + TEST_STRTOULL("010", 16, 16LLU, 3, 0); + TEST_STRTOULL(" 010 ", 16, 16LLU, 5, 0); + TEST_STRTOULL("-010", 16, -16LLU, 4, 0); + + TEST_STRTOULL("11", 8, 9LLU, 2, 0); + TEST_STRTOULL("011", 8, 9LLU, 3, 0); + TEST_STRTOULL("011", 0, 9LLU, 3, 0); + TEST_STRTOULL("-11", 8, -9LLU, 3, 0); + TEST_STRTOULL("-011", 8, -9LLU, 4, 0); + TEST_STRTOULL("-011", 0, -9LLU, 4, 0); + + TEST_STRTOULL("011", 8, 9LLU, 3, 0); + TEST_STRTOULL("011", 0, 9LLU, 3, 0); + TEST_STRTOULL("-11", 8, -9LLU, 3, 0); + TEST_STRTOULL("-011", 8, -9LLU, 4, 0); + TEST_STRTOULL("-011", 0, -9LLU, 4, 0); + + TEST_STRTOULL("Text", 0, 0LLU, 0, 0); + + TEST_STRTOULL("9223372036854775807", 10, 9223372036854775807LLU, 19, 0); + TEST_STRTOULL("9223372036854775807", 0, 9223372036854775807LLU, 19, 0); + TEST_STRTOULL("9223372036854775808", 0, 9223372036854775808LLU, 19, 0); + TEST_STRTOULL("9223372036854775808", 10, 9223372036854775808LLU, 19, 0); + TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 0, 9223372036854775807LLU, 18, 0); + TEST_STRTOULL("0x7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 18, 0); + TEST_STRTOULL("7FFFFFFFFFFFFFFF", 16, 9223372036854775807LLU, 16, 0); + TEST_STRTOULL("0x8000000000000000", 0, 9223372036854775808LLU, 18, 0); + TEST_STRTOULL("0x8000000000000000", 16, 9223372036854775808LLU, 18, 0); + TEST_STRTOULL("8000000000000000", 16, 9223372036854775808LLU, 16, 0); + TEST_STRTOULL("0777777777777777777777", 0, 9223372036854775807LLU, 22, 0); + TEST_STRTOULL("0777777777777777777777", 8, 9223372036854775807LLU, 22, 0); + TEST_STRTOULL("777777777777777777777", 8, 9223372036854775807LLU, 21, 0); + TEST_STRTOULL("01000000000000000000000",0, 9223372036854775808LLU, 23, 0); + TEST_STRTOULL("01000000000000000000000",8, 9223372036854775808LLU, 23, 0); + TEST_STRTOULL("1000000000000000000000", 8, 9223372036854775808LLU, 22, 0); + + TEST_STRTOULL("-9223372036854775808", 10, 9223372036854775808LLU, 20, 0); + TEST_STRTOULL("-9223372036854775808", 0, 9223372036854775808LLU, 20, 0); + TEST_STRTOULL("-9223372036854775809", 0, 9223372036854775807LLU, 20, 0); + TEST_STRTOULL("-9223372036854775809", 10, 9223372036854775807LLU, 20, 0); + TEST_STRTOULL("-0x8000000000000000", 0, 9223372036854775808LLU, 19, 0); + TEST_STRTOULL("-0x8000000000000000", 16, 9223372036854775808LLU, 19, 0); + TEST_STRTOULL("-8000000000000000", 16, 9223372036854775808LLU, 17, 0); + TEST_STRTOULL("-0x8000000000000001", 0, 9223372036854775807LLU, 19, 0); + TEST_STRTOULL("-0x8000000000000001", 16, 9223372036854775807LLU, 19, 0); + TEST_STRTOULL("-8000000000000001", 16, 9223372036854775807LLU, 17, 0); + TEST_STRTOULL("-01000000000000000000000",0, 9223372036854775808LLU, 24, 0); + TEST_STRTOULL("-01000000000000000000000",8, 9223372036854775808LLU, 24, 0); + TEST_STRTOULL("-1000000000000000000000",8, 9223372036854775808LLU, 23, 0); + TEST_STRTOULL("-01000000000000000000001",0, 9223372036854775807LLU, 24, 0); + TEST_STRTOULL("-01000000000000000000001",8, 9223372036854775807LLU, 24, 0); + TEST_STRTOULL("-1000000000000000000001",8, 9223372036854775807LLU, 23, 0); + + TEST_STRTOULL("18446744073709551615", 0, 18446744073709551615LLU, 20, 0); + TEST_STRTOULL("18446744073709551615", 10, 18446744073709551615LLU, 20, 0); + TEST_STRTOULL("18446744073709551616", 0, 18446744073709551615LLU, 20, ERANGE); + TEST_STRTOULL("18446744073709551616", 10, 18446744073709551615LLU, 20, ERANGE); + TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 0, 18446744073709551615LLU, 18, 0); + TEST_STRTOULL("0xFFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 18, 0); + TEST_STRTOULL("FFFFFFFFFFFFFFFF", 16, 18446744073709551615LLU, 16, 0); + TEST_STRTOULL("0x10000000000000000", 0, 18446744073709551615LLU, 19, ERANGE); + TEST_STRTOULL("0x10000000000000000", 16, 18446744073709551615LLU, 19, ERANGE); + TEST_STRTOULL("10000000000000000", 16, 18446744073709551615LLU, 17, ERANGE); + TEST_STRTOULL("01777777777777777777777",0, 18446744073709551615LLU, 23, 0); + TEST_STRTOULL("01777777777777777777777",8, 18446744073709551615LLU, 23, 0); + TEST_STRTOULL("1777777777777777777777", 8, 18446744073709551615LLU, 22, 0); + TEST_STRTOULL("02000000000000000000000",0, 18446744073709551615LLU, 23, ERANGE); + TEST_STRTOULL("02000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); + TEST_STRTOULL("2000000000000000000000", 8, 18446744073709551615LLU, 22, ERANGE); + + TEST_STRTOULL("-18446744073709551615", 0, 1LLU, 21, 0); + TEST_STRTOULL("-18446744073709551615", 10, 1LLU, 21, 0); + TEST_STRTOULL("-18446744073709551616", 0, 18446744073709551615LLU, 21, ERANGE); + TEST_STRTOULL("-18446744073709551616", 10, 18446744073709551615LLU, 21, ERANGE); + TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 0, 1LLU, 19, 0); + TEST_STRTOULL("-0xFFFFFFFFFFFFFFFF", 16, 1LLU, 19, 0); + TEST_STRTOULL("-FFFFFFFFFFFFFFFF", 16, 1LLU, 17, 0); + TEST_STRTOULL("-0x10000000000000000", 0, 18446744073709551615LLU, 20, ERANGE); + TEST_STRTOULL("-0x10000000000000000", 16, 18446744073709551615LLU, 20, ERANGE); + TEST_STRTOULL("-10000000000000000", 16, 18446744073709551615LLU, 18, ERANGE); + TEST_STRTOULL("-01777777777777777777777",0, 1LLU, 24, 0); + TEST_STRTOULL("-01777777777777777777777",8, 1LLU, 24, 0); + TEST_STRTOULL("-1777777777777777777777",8, 1LLU, 23, 0); + TEST_STRTOULL("-02000000000000000000000",0, 18446744073709551615LLU, 24, ERANGE); + TEST_STRTOULL("-02000000000000000000000",8, 18446744073709551615LLU, 24, ERANGE); + TEST_STRTOULL("-2000000000000000000000",8, 18446744073709551615LLU, 23, ERANGE); + + printf("success: strtoull\n"); + return true; +} + +/* +FIXME: +Types: +bool +socklen_t +uint{8,16,32,64}_t +int{8,16,32,64}_t +intptr_t + +Constants: +PATH_NAME_MAX +UINT{16,32,64}_MAX +INT32_MAX +*/ + +static int test_va_copy(void) +{ + /* FIXME */ + return true; +} + +static int test_FUNCTION(void) +{ + printf("test: FUNCTION\n"); + if (strcmp(__FUNCTION__, "test_FUNCTION") != 0) { + printf("failure: FUNCTION [\nFUNCTION invalid\n]\n"); + return false; + } + printf("success: FUNCTION\n"); + return true; +} + +static int test_MIN(void) +{ + printf("test: MIN\n"); + if (MIN(20, 1) != 1) { + printf("failure: MIN [\nMIN invalid\n]\n"); + return false; + } + if (MIN(1, 20) != 1) { + printf("failure: MIN [\nMIN invalid\n]\n"); + return false; + } + printf("success: MIN\n"); + return true; +} + +static int test_MAX(void) +{ + printf("test: MAX\n"); + if (MAX(20, 1) != 20) { + printf("failure: MAX [\nMAX invalid\n]\n"); + return false; + } + if (MAX(1, 20) != 20) { + printf("failure: MAX [\nMAX invalid\n]\n"); + return false; + } + printf("success: MAX\n"); + return true; +} + +static int test_socketpair(void) +{ + int sock[2]; + char buf[20]; + + printf("test: socketpair\n"); + + if (socketpair(AF_UNIX, SOCK_STREAM, 0, sock) == -1) { + printf("failure: socketpair [\n" + "socketpair() failed\n" + "]\n"); + return false; + } + + if (write(sock[1], "automatisch", 12) != 12) { + printf("failure: socketpair [\n" + "write() failed: %s\n" + "]\n", strerror(errno)); + return false; + } + + if (read(sock[0], buf, 12) != 12) { + printf("failure: socketpair [\n" + "read() failed: %s\n" + "]\n", strerror(errno)); + return false; + } + + if (strcmp(buf, "automatisch") != 0) { + printf("failure: socketpair [\n" + "expected: automatisch, got: %s\n" + "]\n", buf); + return false; + } + + printf("success: socketpair\n"); + + return true; +} + +extern int libreplace_test_strptime(void); + +static int test_strptime(void) +{ + return libreplace_test_strptime(); +} + +extern int getifaddrs_test(void); + +static int test_getifaddrs(void) +{ + + printf("test: getifaddrs\n"); + + if (getifaddrs_test() != 0) { + printf("failure: getifaddrs\n"); + return false; + } + + printf("success: getifaddrs\n"); + return true; +} + +static int test_utime(void) +{ + struct utimbuf u; + struct stat st1, st2, st3; + int fd; + + printf("test: utime\n"); + unlink(TESTFILE); + + fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); + if (fd == -1) { + printf("failure: utime [\n" + "creating '%s' failed - %s\n]\n", + TESTFILE, strerror(errno)); + return false; + } + + if (fstat(fd, &st1) != 0) { + printf("failure: utime [\n" + "fstat (1) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + + u.actime = st1.st_atime + 300; + u.modtime = st1.st_mtime - 300; + if (utime(TESTFILE, &u) != 0) { + printf("failure: utime [\n" + "utime(&u) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + + if (fstat(fd, &st2) != 0) { + printf("failure: utime [\n" + "fstat (2) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + + if (utime(TESTFILE, NULL) != 0) { + printf("failure: utime [\n" + "utime(NULL) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + + if (fstat(fd, &st3) != 0) { + printf("failure: utime [\n" + "fstat (3) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + +#define CMP_VAL(a,c,b) do { \ + if (a c b) { \ + printf("failure: utime [\n" \ + "%s: %s(%d) %s %s(%d)\n]\n", \ + __location__, \ + #a, (int)a, #c, #b, (int)b); \ + close(fd); \ + return false; \ + } \ +} while(0) +#define EQUAL_VAL(a,b) CMP_VAL(a,!=,b) +#define GREATER_VAL(a,b) CMP_VAL(a,<=,b) +#define LESSER_VAL(a,b) CMP_VAL(a,>=,b) + + EQUAL_VAL(st2.st_atime, st1.st_atime + 300); + EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); + LESSER_VAL(st3.st_atime, st2.st_atime); + GREATER_VAL(st3.st_mtime, st2.st_mtime); + +#undef CMP_VAL +#undef EQUAL_VAL +#undef GREATER_VAL +#undef LESSER_VAL + + unlink(TESTFILE); + printf("success: utime\n"); + close(fd); + return true; +} + +static int test_utimes(void) +{ + struct timeval tv[2]; + struct stat st1, st2; + int fd; + + printf("test: utimes\n"); + unlink(TESTFILE); + + fd = open(TESTFILE, O_RDWR|O_CREAT, 0600); + if (fd == -1) { + printf("failure: utimes [\n" + "creating '%s' failed - %s\n]\n", + TESTFILE, strerror(errno)); + return false; + } + + if (fstat(fd, &st1) != 0) { + printf("failure: utimes [\n" + "fstat (1) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + + ZERO_STRUCT(tv); + tv[0].tv_sec = st1.st_atime + 300; + tv[1].tv_sec = st1.st_mtime - 300; + if (utimes(TESTFILE, tv) != 0) { + printf("failure: utimes [\n" + "utimes(tv) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + + if (fstat(fd, &st2) != 0) { + printf("failure: utimes [\n" + "fstat (2) failed - %s\n]\n", + strerror(errno)); + close(fd); + return false; + } + +#define EQUAL_VAL(a,b) do { \ + if (a != b) { \ + printf("failure: utimes [\n" \ + "%s: %s(%d) != %s(%d)\n]\n", \ + __location__, \ + #a, (int)a, #b, (int)b); \ + close(fd); \ + return false; \ + } \ +} while(0) + + EQUAL_VAL(st2.st_atime, st1.st_atime + 300); + EQUAL_VAL(st2.st_mtime, st1.st_mtime - 300); + +#undef EQUAL_VAL + + unlink(TESTFILE); + printf("success: utimes\n"); + close(fd); + return true; +} + +static int test_memmem(void) +{ + char *s; + + printf("test: memmem\n"); + + s = (char *)memmem("foo", 3, "fo", 2); + if (strcmp(s, "foo") != 0) { + printf(__location__ ": Failed memmem\n"); + return false; + } + + s = (char *)memmem("foo", 3, "", 0); + /* it is allowable for this to return NULL (as happens on + FreeBSD) */ + if (s && strcmp(s, "foo") != 0) { + printf(__location__ ": Failed memmem\n"); + return false; + } + + s = (char *)memmem("foo", 4, "o", 1); + if (strcmp(s, "oo") != 0) { + printf(__location__ ": Failed memmem\n"); + return false; + } + + s = (char *)memmem("foobarfodx", 11, "fod", 3); + if (strcmp(s, "fodx") != 0) { + printf(__location__ ": Failed memmem\n"); + return false; + } + + printf("success: memmem\n"); + + return true; +} + +static bool test_closefrom(void) +{ + int i, fd; + + for (i=0; i<100; i++) { + fd = dup(0); + if (fd == -1) { + perror("dup failed"); + closefrom(3); + return false; + } + + /* 1000 is just an arbitrarily chosen upper bound */ + + if (fd >= 1000) { + printf("fd=%d\n", fd); + closefrom(3); + return false; + } + } + + closefrom(3); + + for (i=3; i<=fd; i++) { + off_t off; + off = lseek(i, 0, SEEK_CUR); + if ((off != (off_t)-1) || (errno != EBADF)) { + printf("fd %d not closed\n", i); + return false; + } + } + + return true; +} + +static bool test_array_del_element(void) +{ + int a[] = { 1,2,3,4,5 }; + + printf("test: array_del_element\n"); + + ARRAY_DEL_ELEMENT(a, 4, ARRAY_SIZE(a)); + + if ((a[0] != 1) || + (a[1] != 2) || + (a[2] != 3) || + (a[3] != 4) || + (a[4] != 5)) { + return false; + } + + ARRAY_DEL_ELEMENT(a, 0, ARRAY_SIZE(a)); + + if ((a[0] != 2) || + (a[1] != 3) || + (a[2] != 4) || + (a[3] != 5) || + (a[4] != 5)) { + return false; + } + + ARRAY_DEL_ELEMENT(a, 2, ARRAY_SIZE(a)); + + if ((a[0] != 2) || + (a[1] != 3) || + (a[2] != 5) || + (a[3] != 5) || + (a[4] != 5)) { + return false; + } + + printf("success: array_del_element\n"); + + return true; +} + +bool torture_local_replace(struct torture_context *ctx) +{ + bool ret = true; + ret &= test_ftruncate(); + ret &= test_strlcpy(); + ret &= test_strlcat(); + ret &= test_mktime(); + ret &= test_initgroups(); + ret &= test_memmove(); + ret &= test_strdup(); + ret &= test_setlinebuf(); + ret &= test_vsyslog(); + ret &= test_timegm(); + ret &= test_setenv(); + ret &= test_strndup(); + ret &= test_strnlen(); + ret &= test_waitpid(); + ret &= test_seteuid(); + ret &= test_setegid(); + ret &= test_asprintf(); + ret &= test_snprintf(); + ret &= test_vasprintf(); + ret &= test_vsnprintf(); + ret &= test_opendir(); + ret &= test_readdir(); + ret &= test_telldir(); + ret &= test_seekdir(); + ret &= test_dlopen(); + ret &= test_chroot(); + ret &= test_bzero(); + ret &= test_strerror(); + ret &= test_errno(); + ret &= test_mkdtemp(); + ret &= test_mkstemp(); + ret &= test_pread(); + ret &= test_pwrite(); + ret &= test_inet_ntoa(); + ret &= test_strtoll(); + ret &= test_strtoull(); + ret &= test_va_copy(); + ret &= test_FUNCTION(); + ret &= test_MIN(); + ret &= test_MAX(); + ret &= test_socketpair(); + ret &= test_strptime(); + ret &= test_getifaddrs(); + ret &= test_utime(); + ret &= test_utimes(); + ret &= test_memmem(); + ret &= test_closefrom(); + ret &= test_array_del_element(); + + return ret; +} diff --git a/lib/replace/timegm.c b/lib/replace/timegm.c new file mode 100644 index 0000000..93263a2 --- /dev/null +++ b/lib/replace/timegm.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 1997 Kungliga Tekniska Högskolan + * (Royal Institute of Technology, Stockholm, Sweden). + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * 3. Neither the name of the Institute nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE INSTITUTE AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE INSTITUTE OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +/* + adapted for Samba4 by Andrew Tridgell +*/ + +#include "replace.h" +#include "system/time.h" + +static int is_leap(unsigned y) +{ + y += 1900; + return (y % 4) == 0 && ((y % 100) != 0 || (y % 400) == 0); +} + +time_t rep_timegm(struct tm *tm) +{ + static const unsigned ndays[2][12] ={ + {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}, + {31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31}}; + time_t res = 0; + unsigned i; + + if (tm->tm_mon > 12 || + tm->tm_mon < 0 || + tm->tm_mday > 31 || + tm->tm_min > 60 || + tm->tm_sec > 60 || + tm->tm_hour > 24) { + /* invalid tm structure */ + return 0; + } + + for (i = 70; i < tm->tm_year; ++i) + res += is_leap(i) ? 366 : 365; + + for (i = 0; i < tm->tm_mon; ++i) + res += ndays[is_leap(tm->tm_year)][i]; + res += tm->tm_mday - 1; + res *= 24; + res += tm->tm_hour; + res *= 60; + res += tm->tm_min; + res *= 60; + res += tm->tm_sec; + return res; +} diff --git a/lib/replace/win32_replace.h b/lib/replace/win32_replace.h new file mode 100644 index 0000000..9901e72 --- /dev/null +++ b/lib/replace/win32_replace.h @@ -0,0 +1,159 @@ +#ifndef _WIN32_REPLACE_H +#define _WIN32_REPLACE_H + +#ifdef HAVE_WINSOCK2_H +#include <winsock2.h> +#endif + +#ifdef HAVE_WS2TCPIP_H +#include <ws2tcpip.h> +#endif + +#ifdef HAVE_WINDOWS_H +#include <windows.h> +#endif + +/* Map BSD Socket errorcodes to the WSA errorcodes (if possible) */ + +#define EAFNOSUPPORT WSAEAFNOSUPPORT +#define ECONNREFUSED WSAECONNREFUSED +#define EINPROGRESS WSAEINPROGRESS +#define EMSGSIZE WSAEMSGSIZE +#define ENOBUFS WSAENOBUFS +#define ENOTSOCK WSAENOTSOCK +#define ENETUNREACH WSAENETUNREACH +#define ENOPROTOOPT WSAENOPROTOOPT +#define ENOTCONN WSAENOTCONN +#define ENOTSUP 134 + +/* We undefine the following constants due to conflicts with the w32api headers + * and the Windows Platform SDK/DDK. + */ + +#undef interface + +#undef ERROR_INVALID_PARAMETER +#undef ERROR_INSUFFICIENT_BUFFER +#undef ERROR_INVALID_DATATYPE + +#undef FILE_GENERIC_READ +#undef FILE_GENERIC_WRITE +#undef FILE_GENERIC_EXECUTE +#undef FILE_ATTRIBUTE_READONLY +#undef FILE_ATTRIBUTE_HIDDEN +#undef FILE_ATTRIBUTE_SYSTEM +#undef FILE_ATTRIBUTE_DIRECTORY +#undef FILE_ATTRIBUTE_ARCHIVE +#undef FILE_ATTRIBUTE_DEVICE +#undef FILE_ATTRIBUTE_NORMAL +#undef FILE_ATTRIBUTE_TEMPORARY +#undef FILE_ATTRIBUTE_REPARSE_POINT +#undef FILE_ATTRIBUTE_COMPRESSED +#undef FILE_ATTRIBUTE_OFFLINE +#undef FILE_ATTRIBUTE_ENCRYPTED +#undef FILE_FLAG_WRITE_THROUGH +#undef FILE_FLAG_NO_BUFFERING +#undef FILE_FLAG_RANDOM_ACCESS +#undef FILE_FLAG_SEQUENTIAL_SCAN +#undef FILE_FLAG_DELETE_ON_CLOSE +#undef FILE_FLAG_BACKUP_SEMANTICS +#undef FILE_FLAG_POSIX_SEMANTICS +#undef FILE_TYPE_DISK +#undef FILE_TYPE_UNKNOWN +#undef FILE_CASE_SENSITIVE_SEARCH +#undef FILE_CASE_PRESERVED_NAMES +#undef FILE_UNICODE_ON_DISK +#undef FILE_PERSISTENT_ACLS +#undef FILE_FILE_COMPRESSION +#undef FILE_VOLUME_QUOTAS +#undef FILE_VOLUME_IS_COMPRESSED +#undef FILE_NOTIFY_CHANGE_FILE_NAME +#undef FILE_NOTIFY_CHANGE_DIR_NAME +#undef FILE_NOTIFY_CHANGE_ATTRIBUTES +#undef FILE_NOTIFY_CHANGE_SIZE +#undef FILE_NOTIFY_CHANGE_LAST_WRITE +#undef FILE_NOTIFY_CHANGE_LAST_ACCESS +#undef FILE_NOTIFY_CHANGE_CREATION +#undef FILE_NOTIFY_CHANGE_EA +#undef FILE_NOTIFY_CHANGE_SECURITY +#undef FILE_NOTIFY_CHANGE_STREAM_NAME +#undef FILE_NOTIFY_CHANGE_STREAM_SIZE +#undef FILE_NOTIFY_CHANGE_STREAM_WRITE +#undef FILE_NOTIFY_CHANGE_NAME + +#undef PRINTER_ATTRIBUTE_QUEUED +#undef PRINTER_ATTRIBUTE_DIRECT +#undef PRINTER_ATTRIBUTE_DEFAULT +#undef PRINTER_ATTRIBUTE_SHARED +#undef PRINTER_ATTRIBUTE_NETWORK +#undef PRINTER_ATTRIBUTE_HIDDEN +#undef PRINTER_ATTRIBUTE_LOCAL +#undef PRINTER_ATTRIBUTE_ENABLE_DEVQ +#undef PRINTER_ATTRIBUTE_KEEPPRINTEDJOBS +#undef PRINTER_ATTRIBUTE_DO_COMPLETE_FIRST +#undef PRINTER_ATTRIBUTE_WORK_OFFLINE +#undef PRINTER_ATTRIBUTE_ENABLE_BIDI +#undef PRINTER_ATTRIBUTE_RAW_ONLY +#undef PRINTER_ATTRIBUTE_PUBLISHED +#undef PRINTER_ENUM_DEFAULT +#undef PRINTER_ENUM_LOCAL +#undef PRINTER_ENUM_CONNECTIONS +#undef PRINTER_ENUM_FAVORITE +#undef PRINTER_ENUM_NAME +#undef PRINTER_ENUM_REMOTE +#undef PRINTER_ENUM_SHARED +#undef PRINTER_ENUM_NETWORK +#undef PRINTER_ENUM_EXPAND +#undef PRINTER_ENUM_CONTAINER +#undef PRINTER_ENUM_ICON1 +#undef PRINTER_ENUM_ICON2 +#undef PRINTER_ENUM_ICON3 +#undef PRINTER_ENUM_ICON4 +#undef PRINTER_ENUM_ICON5 +#undef PRINTER_ENUM_ICON6 +#undef PRINTER_ENUM_ICON7 +#undef PRINTER_ENUM_ICON8 +#undef PRINTER_STATUS_PAUSED +#undef PRINTER_STATUS_ERROR +#undef PRINTER_STATUS_PENDING_DELETION +#undef PRINTER_STATUS_PAPER_JAM +#undef PRINTER_STATUS_PAPER_OUT +#undef PRINTER_STATUS_MANUAL_FEED +#undef PRINTER_STATUS_PAPER_PROBLEM +#undef PRINTER_STATUS_OFFLINE +#undef PRINTER_STATUS_IO_ACTIVE +#undef PRINTER_STATUS_BUSY +#undef PRINTER_STATUS_PRINTING +#undef PRINTER_STATUS_OUTPUT_BIN_FULL +#undef PRINTER_STATUS_NOT_AVAILABLE +#undef PRINTER_STATUS_WAITING +#undef PRINTER_STATUS_PROCESSING +#undef PRINTER_STATUS_INITIALIZING +#undef PRINTER_STATUS_WARMING_UP +#undef PRINTER_STATUS_TONER_LOW +#undef PRINTER_STATUS_NO_TONER +#undef PRINTER_STATUS_PAGE_PUNT +#undef PRINTER_STATUS_USER_INTERVENTION +#undef PRINTER_STATUS_OUT_OF_MEMORY +#undef PRINTER_STATUS_DOOR_OPEN +#undef PRINTER_STATUS_SERVER_UNKNOWN +#undef PRINTER_STATUS_POWER_SAVE + +#undef DWORD +#undef HKEY_CLASSES_ROOT +#undef HKEY_CURRENT_USER +#undef HKEY_LOCAL_MACHINE +#undef HKEY_USERS +#undef HKEY_PERFORMANCE_DATA +#undef HKEY_CURRENT_CONFIG +#undef HKEY_DYN_DATA +#undef REG_DWORD +#undef REG_QWORD + +#undef SERVICE_STATE_ALL + +#undef SE_GROUP_MANDATORY +#undef SE_GROUP_ENABLED_BY_DEFAULT +#undef SE_GROUP_ENABLED + +#endif /* _WIN32_REPLACE_H */ diff --git a/lib/replace/wscript b/lib/replace/wscript new file mode 100644 index 0000000..77e655b --- /dev/null +++ b/lib/replace/wscript @@ -0,0 +1,1010 @@ +#!/usr/bin/env python + +APPNAME = 'libreplace' +VERSION = '1.2.1' + +import sys +import os + +# find the buildtools directory +top = '.' +while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: + top = top + '/..' +sys.path.insert(0, top + '/buildtools/wafsamba') + +out = 'bin' + +import wafsamba +from wafsamba import samba_dist +from waflib import Options, Utils, Logs, Context + +samba_dist.DIST_DIRS('lib/replace buildtools:buildtools third_party/waf:third_party/waf') + +def options(opt): + opt.BUILTIN_DEFAULT('NONE') + opt.PRIVATE_EXTENSION_DEFAULT('') + opt.RECURSE('buildtools/wafsamba') + +@Utils.run_once +def configure(conf): + conf.RECURSE('buildtools/wafsamba') + + conf.env.standalone_replace = conf.IN_LAUNCH_DIR() + + if sys.platform.rfind('linux') > -1: + conf.DEFINE('LINUX', '1') + + conf.DEFINE('BOOL_DEFINED', 1) + conf.DEFINE('HAVE_LIBREPLACE', 1) + conf.DEFINE('LIBREPLACE_NETWORK_CHECKS', 1) + + conf.CHECK_HEADERS('linux/types.h crypt.h locale.h acl/libacl.h compat.h') + conf.CHECK_HEADERS('acl/libacl.h attr/xattr.h compat.h ctype.h dustat.h') + conf.CHECK_HEADERS('fcntl.h fnmatch.h glob.h history.h krb5.h langinfo.h') + conf.CHECK_HEADERS('locale.h ndir.h pwd.h') + conf.CHECK_HEADERS('shadow.h sys/acl.h') + conf.CHECK_HEADERS('sys/attributes.h attr/attributes.h sys/capability.h sys/dir.h sys/epoll.h') + conf.CHECK_HEADERS('sys/fcntl.h sys/filio.h sys/filsys.h sys/fs/s5param.h') + conf.CHECK_HEADERS('sys/id.h sys/ioctl.h sys/ipc.h sys/mman.h sys/mode.h sys/ndir.h sys/priv.h') + conf.CHECK_HEADERS('sys/resource.h sys/security.h sys/shm.h sys/statfs.h sys/statvfs.h sys/termio.h') + conf.CHECK_HEADERS('sys/vfs.h sys/xattr.h termio.h termios.h sys/file.h') + conf.CHECK_HEADERS('sys/ucontext.h sys/wait.h sys/stat.h') + + if not conf.CHECK_DECLS('malloc', headers='stdlib.h'): + conf.CHECK_HEADERS('malloc.h') + + conf.CHECK_HEADERS('grp.h') + conf.CHECK_HEADERS('sys/select.h setjmp.h utime.h sys/syslog.h syslog.h') + conf.CHECK_HEADERS('stdarg.h vararg.h sys/mount.h mntent.h') + conf.CHECK_HEADERS('stropts.h unix.h string.h strings.h sys/param.h limits.h') + conf.CHECK_HEADERS('''sys/socket.h netinet/in.h netdb.h arpa/inet.h netinet/in_systm.h + netinet/ip.h netinet/tcp.h netinet/in_ip.h + sys/sockio.h sys/un.h''', together=True) + conf.CHECK_HEADERS('sys/uio.h ifaddrs.h direct.h dirent.h') + conf.CHECK_HEADERS('windows.h winsock2.h ws2tcpip.h') + conf.CHECK_HEADERS('errno.h') + conf.CHECK_HEADERS('getopt.h iconv.h') + conf.CHECK_HEADERS('memory.h nss.h sasl/sasl.h') + conf.CHECK_HEADERS('linux/openat2.h') + + conf.CHECK_FUNCS_IN('inotify_init', 'inotify', checklibc=True, + headers='sys/inotify.h') + + conf.CHECK_HEADERS('security/pam_appl.h zlib.h asm/unistd.h') + conf.CHECK_HEADERS('sys/unistd.h alloca.h float.h') + + conf.SET_TARGET_TYPE('tirpc', 'EMPTY') + + if conf.CHECK_CODE( + '\n#ifndef _TIRPC_RPC_H\n#error "no tirpc headers in system path"\n#endif\n', + 'HAVE_RPC_RPC_HEADERS', + headers=['rpc/rpc.h', 'rpc/nettype.h'], + msg='Checking for tirpc rpc headers in default system path'): + if conf.CONFIG_SET('HAVE_RPC_RPC_H'): + conf.undefine('HAVE_RPC_RPC_H') + + if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): + if conf.CHECK_CFG(package='libtirpc', args='--cflags --libs', + msg='Checking for libtirpc headers', + uselib_store='TIRPC'): + conf.CHECK_HEADERS('rpc/rpc.h rpc/nettype.h', lib='tirpc', together=True) + conf.SET_TARGET_TYPE('tirpc', 'SYSLIB') + if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): + if conf.CHECK_CFG(package='libntirpc', args='--cflags', + msg='Checking for libntirpc headers', + uselib_store='TIRPC'): + conf.CHECK_HEADERS('rpc/rpc.h rpc/nettype.h', lib='tirpc', together=True) + conf.SET_TARGET_TYPE('tirpc', 'SYSLIB') + if not conf.CONFIG_SET('HAVE_RPC_RPC_H'): + Logs.warn('No rpc/rpc.h header found, tirpc or libntirpc missing?') + + # This file is decprecated with glibc >= 2.30 so we need to check if it + # includes a deprecation warning: + # #warning "The <sys/sysctl.h> header is deprecated and will be removed." + conf.CHECK_CODE(''' + #include <sys/sysctl.h> + int main(void) { return 0; } + ''', + define='HAVE_SYS_SYSCTL_H', + cflags=['-Werror=cpp'], + addmain=False, + msg='Checking for header sys/sysctl.h') + + conf.CHECK_HEADERS('sys/fileio.h sys/filesys.h sys/dustat.h sys/sysmacros.h') + conf.CHECK_HEADERS('xfs/libxfs.h netgroup.h') + + conf.CHECK_HEADERS('valgrind.h valgrind/valgrind.h') + conf.CHECK_HEADERS('valgrind/memcheck.h valgrind/helgrind.h valgrind/callgrind.h') + conf.CHECK_HEADERS('nss_common.h nsswitch.h ns_api.h') + conf.CHECK_HEADERS('sys/extattr.h sys/ea.h sys/proplist.h sys/cdefs.h') + conf.CHECK_HEADERS('utmp.h utmpx.h lastlog.h') + conf.CHECK_HEADERS('syscall.h sys/syscall.h inttypes.h') + conf.CHECK_HEADERS('sys/atomic.h stdatomic.h') + conf.CHECK_HEADERS('libgen.h') + + if conf.CHECK_CFLAGS('-Wno-format-truncation'): + conf.define('HAVE_WNO_FORMAT_TRUNCATION', '1') + + if conf.CHECK_CFLAGS('-Wno-unused-function'): + conf.define('HAVE_WNO_UNUSED_FUNCTION', '1') + + if conf.CHECK_CFLAGS('-Wno-strict-overflow'): + conf.define('HAVE_WNO_STRICT_OVERFLOW', '1') + + if conf.CHECK_CFLAGS('-Wno-unused-but-set-variable'): + conf.define('HAVE_WNO_UNUSED_BUT_SET_VARIABLE', '1') + + if conf.CHECK_CFLAGS('-Wuse-after-free=1'): + conf.define('HAVE_WUSE_AFTER_FREE_1', '1') + + # Check for process set name support + conf.CHECK_CODE(''' + #include <sys/prctl.h> + int main(void) { + prctl(0); + return 0; + } + ''', + 'HAVE_PRCTL', + addmain=False, + headers='sys/prctl.h', + msg='Checking for prctl syscall') + + conf.CHECK_CODE(''' + #include <unistd.h> + #ifdef HAVE_FCNTL_H + #include <fcntl.h> + #endif + int main(void) { int fd = open("/dev/null", O_DIRECT); } + ''', + define='HAVE_OPEN_O_DIRECT', + addmain=False, + msg='Checking for O_DIRECT flag to open(2)') + + conf.CHECK_TYPES('"long long" intptr_t uintptr_t ptrdiff_t comparison_fn_t') + if not conf.CHECK_TYPE('bool', define='HAVE_BOOL', headers='stdbool.h'): + raise Errors.WafError('Samba requires a genuine boolean type') + + conf.CHECK_TYPE('int8_t', 'char') + conf.CHECK_TYPE('uint8_t', 'unsigned char') + conf.CHECK_TYPE('int16_t', 'short') + conf.CHECK_TYPE('uint16_t', 'unsigned short') + conf.CHECK_TYPE('int32_t', 'int') + conf.CHECK_TYPE('uint32_t', 'unsigned') + conf.CHECK_TYPE('int64_t', 'long long') + conf.CHECK_TYPE('uint64_t', 'unsigned long long') + conf.CHECK_TYPE('size_t', 'unsigned int') + conf.CHECK_TYPE('ssize_t', 'int') + conf.CHECK_TYPE('ino_t', 'unsigned') + conf.CHECK_TYPE('loff_t', 'off_t') + conf.CHECK_TYPE('offset_t', 'loff_t') + conf.CHECK_TYPE('volatile int', define='HAVE_VOLATILE') + conf.CHECK_TYPE('uint_t', 'unsigned int') + conf.CHECK_TYPE('blksize_t', 'long', headers='sys/types.h sys/stat.h unistd.h') + conf.CHECK_TYPE('blkcnt_t', 'long', headers='sys/types.h sys/stat.h unistd.h') + + conf.CHECK_SIZEOF('bool char int "long long" long short size_t ssize_t') + sizeof_int = conf.env["SIZEOF_INT"] + if sizeof_int < 4: + conf.fatal(f"Samba won't work with int of size {sizeof_int} (requires >= 4)") + + conf.CHECK_SIZEOF('int8_t uint8_t int16_t uint16_t int32_t uint32_t int64_t uint64_t') + conf.CHECK_SIZEOF('void*', define='SIZEOF_VOID_P') + conf.CHECK_SIZEOF('off_t dev_t ino_t time_t') + + conf.CHECK_TYPES('socklen_t', headers='sys/socket.h') + conf.CHECK_TYPE_IN('struct ifaddrs', 'ifaddrs.h') + conf.CHECK_TYPE_IN('struct addrinfo', 'netdb.h') + conf.CHECK_TYPE_IN('struct sockaddr', 'sys/socket.h') + conf.CHECK_CODE('struct sockaddr_in6 x', define='HAVE_STRUCT_SOCKADDR_IN6', + headers='sys/socket.h netdb.h netinet/in.h') + conf.CHECK_TYPE_IN('struct sockaddr_storage', 'sys/socket.h') + conf.CHECK_TYPE_IN('sa_family_t', 'sys/socket.h') + + conf.CHECK_TYPE_IN('sig_atomic_t', 'signal.h', define='HAVE_SIG_ATOMIC_T_TYPE') + conf.CHECK_FUNCS('sigsetmask siggetmask sigprocmask sigblock sigaction sigset') + + # Those functions are normally available in libc + if not conf.CHECK_FUNCS(''' + inet_ntoa + inet_aton + inet_ntop + inet_pton + connect + gethostbyname + getaddrinfo + getnameinfo + freeaddrinfo + gai_strerror + socketpair''', + headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h'): + conf.CHECK_FUNCS_IN(''' + inet_ntoa + inet_aton + inet_ntop + inet_pton + connect + gethostbyname + getaddrinfo + getnameinfo + freeaddrinfo + gai_strerror + socketpair''', + 'socket nsl', + headers='sys/socket.h netinet/in.h arpa/inet.h netdb.h') + conf.DEFINE('REPLACE_REQUIRES_LIBSOCKET_LIBNSL', 1) + + conf.CHECK_FUNCS('memset_s memset_explicit') + + conf.CHECK_CODE(''' + #include <string.h> + + int main(void) + { + char buf[] = "This is some content"; + memset(buf, '\0', sizeof(buf)); __asm__ volatile("" : : "g"(&buf) : "memory"); + return 0; + } + ''', + define='HAVE_GCC_VOLATILE_MEMORY_PROTECTION', + addmain=False, + msg='Checking for volatile memory protection', + local_include=False) + + # Some old Linux systems have broken header files and + # miss the IPV6_V6ONLY define in netinet/in.h, + # but have it in linux/in6.h. + # We can't include both files so we just check if the value + # if defined and do the replacement in system/network.h + if not conf.CHECK_VARIABLE('IPV6_V6ONLY', + headers='sys/socket.h netdb.h netinet/in.h'): + conf.CHECK_CODE(''' + #include <linux/in6.h> + #if (IPV6_V6ONLY != 26) + #error no IPV6_V6ONLY support on linux + #endif + int main(void) { return IPV6_V6ONLY; } + ''', + define='HAVE_LINUX_IPV6_V6ONLY_26', + addmain=False, + msg='Checking for IPV6_V6ONLY in linux/in6.h', + local_include=False) + + conf.CHECK_CODE(''' + struct sockaddr_storage sa_store; + struct addrinfo *ai = NULL; + struct in6_addr in6addr; + int idx = if_nametoindex("iface1"); + int s = socket(AF_INET6, SOCK_STREAM, 0); + int ret = getaddrinfo(NULL, NULL, NULL, &ai); + if (ret != 0) { + const char *es = gai_strerror(ret); + } + freeaddrinfo(ai); + { + int val = 1; + #ifdef HAVE_LINUX_IPV6_V6ONLY_26 + #define IPV6_V6ONLY 26 + #endif + ret = setsockopt(s, IPPROTO_IPV6, IPV6_V6ONLY, + (const void *)&val, sizeof(val)); + } + ''', + define='HAVE_IPV6', + lib='nsl socket', + headers='sys/socket.h netdb.h netinet/in.h net/if.h') + + if conf.CONFIG_SET('HAVE_SYS_UCONTEXT_H') and conf.CONFIG_SET('HAVE_SIGNAL_H'): + conf.CHECK_CODE(''' + ucontext_t uc; + sigaddset(&uc.uc_sigmask, SIGUSR1); + ''', + 'HAVE_UCONTEXT_T', + msg="Checking whether we have ucontext_t", + headers='signal.h sys/ucontext.h') + + # Check for atomic builtins. */ + conf.CHECK_CODE(''' + int i; + (void)__sync_fetch_and_add(&i, 1); + ''', + 'HAVE___SYNC_FETCH_AND_ADD', + msg='Checking for __sync_fetch_and_add compiler builtin') + + conf.CHECK_CODE(''' + int i; + (void)__sync_add_and_fetch(&i, 1); + ''', + 'HAVE___SYNC_ADD_AND_FETCH', + msg='Checking for __sync_add_and_fetch compiler builtin') + + conf.CHECK_CODE(''' + int32_t i; + atomic_add_32(&i, 1); + ''', + 'HAVE_ATOMIC_ADD_32', + headers='stdint.h sys/atomic.h', + msg='Checking for atomic_add_32 compiler builtin') + + conf.CHECK_CODE(''' + uint32_t i,j; + j = __atomic_add_fetch(&i,1,__ATOMIC_SEQ_CST) + ''', + 'HAVE___ATOMIC_ADD_FETCH', + headers='stdint.h', + msg='Checking for __atomic_add_fetch compiler builtin') + + conf.CHECK_CODE(''' + uint32_t i,j; + __atomic_load(&i,&j,__ATOMIC_SEQ_CST) + ''', + 'HAVE___ATOMIC_ADD_LOAD', + headers='stdint.h', + msg='Checking for __atomic_load compiler builtin') + + # Check for thread fence. */ + tf = conf.CHECK_CODE('atomic_thread_fence(memory_order_seq_cst);', + 'HAVE_ATOMIC_THREAD_FENCE', + headers='stdatomic.h', + msg='Checking for atomic_thread_fence(memory_order_seq_cst) in stdatomic.h') + if not tf: + tf = conf.CHECK_CODE('__atomic_thread_fence(__ATOMIC_SEQ_CST);', + 'HAVE___ATOMIC_THREAD_FENCE', + msg='Checking for __atomic_thread_fence(__ATOMIC_SEQ_CST)') + if not tf: + # __sync_synchronize() is available since 2005 in gcc. + tf = conf.CHECK_CODE('__sync_synchronize();', + 'HAVE___SYNC_SYNCHRONIZE', + msg='Checking for __sync_synchronize') + if tf: + conf.DEFINE('HAVE_ATOMIC_THREAD_FENCE_SUPPORT', 1) + + conf.CHECK_CODE(''' + #define FALL_THROUGH __attribute__((fallthrough)) + + enum direction_e { + UP = 0, + DOWN, + }; + + int main(void) { + enum direction_e key = UP; + int i = 10; + int j = 0; + + switch (key) { + case UP: + i = 5; + FALL_THROUGH; + case DOWN: + j = i * 2; + break; + default: + break; + } + + if (j < i) { + return 1; + } + + return 0; + } + ''', + 'HAVE_FALLTHROUGH_ATTRIBUTE', + addmain=False, + strict=True, + cflags=['-Werror=missing-declarations'], + msg='Checking for fallthrough attribute') + + # these may be builtins, so we need the link=False strategy + conf.CHECK_FUNCS('strdup memmem printf memset memcpy memmove strcpy strncpy bzero', link=False) + + # See https://bugzilla.samba.org/show_bug.cgi?id=1097 + # + # Ported in from autoconf where it was added with this commit: + # commit 804cfb20a067b4b687089dc72a8271b3abf20f31 + # Author: Simo Sorce <idra@samba.org> + # Date: Wed Aug 25 14:24:16 2004 +0000 + # r2070: Let's try to overload srnlen and strndup for AIX where they are natly broken. + + host_os = sys.platform + if host_os.rfind('aix') > -1: + conf.DEFINE('BROKEN_STRNLEN', 1) + conf.DEFINE('BROKEN_STRNDUP', 1) + + conf.CHECK_FUNCS('shl_load shl_unload shl_findsym') + conf.CHECK_FUNCS('pipe strftime srandom random srand rand usleep setbuffer') + conf.CHECK_FUNCS('lstat getpgrp utime utimes setuid seteuid setreuid setresuid setgid setegid') + conf.CHECK_FUNCS('setregid setresgid chroot strerror vsyslog setlinebuf mktime') + conf.CHECK_FUNCS('ftruncate chsize rename waitpid wait4') + conf.CHECK_FUNCS('initgroups pread pwrite strndup strcasestr strsep') + conf.CHECK_FUNCS('strtok_r mkdtemp dup2 dprintf vdprintf isatty chown lchown') + conf.CHECK_FUNCS('link readlink symlink realpath snprintf vsnprintf') + conf.CHECK_FUNCS('asprintf vasprintf setenv unsetenv strnlen strtoull __strtoull') + conf.CHECK_FUNCS('strtouq strtoll __strtoll strtoq memalign posix_memalign') + conf.CHECK_FUNCS('fmemopen') + + if conf.CONFIG_SET('HAVE_MEMALIGN'): + conf.CHECK_DECLS('memalign', headers='malloc.h') + + # glibc up to 2.3.6 had dangerously broken posix_fallocate(). DON'T USE IT. + if conf.CHECK_CODE(''' +#define _XOPEN_SOURCE 600 +#include <stdlib.h> +#if defined(__GLIBC__) && ((__GLIBC__ < 2) || (__GLIBC__ == 2 && __GLIBC_MINOR__ < 4)) +#error probably broken posix_fallocate +#endif +''', + '_POSIX_FALLOCATE_CAPABLE_LIBC', + msg='Checking for posix_fallocate-capable libc'): + conf.CHECK_FUNCS('posix_fallocate') + + conf.CHECK_FUNCS('prctl dirname basename') + + strlcpy_in_bsd = False + + # libbsd on some platforms provides strlcpy and strlcat + if not conf.CHECK_FUNCS('strlcpy strlcat'): + if conf.CHECK_FUNCS_IN('strlcpy strlcat', 'bsd', headers='bsd/string.h', + checklibc=True): + strlcpy_in_bsd = True + elif conf.env.enable_fuzzing: + # Just to complicate it more, some versions of Honggfuzz have + # got strlcpy and strlcat in libc, but not in <string.h> + # (unless it is there coincidentally, on a BSD). Therefore we + # can't use CHECK_FUNCS alone to decide whether to add the + # headers to replace.h. + # + # As this is only known to happen on a fuzzing compiler, we'll + # skip the check when not in fuzzing mode. + conf.CHECK_HEADERS('bsd/string.h') + + if not conf.CHECK_FUNCS('getpeereid'): + conf.CHECK_FUNCS_IN('getpeereid', 'bsd', headers='sys/types.h bsd/unistd.h') + if not conf.CHECK_FUNCS_IN('setproctitle', 'setproctitle', headers='setproctitle.h'): + conf.CHECK_FUNCS_IN('setproctitle', 'bsd', headers='sys/types.h bsd/unistd.h') + if not conf.CHECK_FUNCS('setproctitle_init'): + conf.CHECK_FUNCS_IN('setproctitle_init', 'bsd', headers='sys/types.h bsd/unistd.h') + + if not conf.CHECK_FUNCS('closefrom'): + conf.CHECK_FUNCS_IN('closefrom', 'bsd', headers='bsd/unistd.h') + + conf.CHECK_CODE(''' + struct ucred cred; + socklen_t cred_len; + int ret = getsockopt(0, SOL_SOCKET, SO_PEERCRED, &cred, &cred_len);''', + 'HAVE_PEERCRED', + msg="Checking whether we can use SO_PEERCRED to get socket credentials", + headers='sys/types.h sys/socket.h') + + #Some OS (ie. freebsd) return EINVAL if the conversion could not be done, it's not what we expect + #Let's detect those cases + if conf.CONFIG_SET('HAVE_STRTOLL'): + conf.CHECK_CODE(''' + long long nb = strtoll("Text", NULL, 0); + if (errno == EINVAL) { + return 0; + } else { + return 1; + } + ''', + msg="Checking correct behavior of strtoll", + headers = 'errno.h', + execute = True, + define = 'HAVE_BSD_STRTOLL', + ) + conf.CHECK_FUNCS('if_nameindex if_nametoindex strerror_r') + conf.CHECK_FUNCS('syslog') + conf.CHECK_FUNCS('gai_strerror get_current_dir_name') + conf.CHECK_FUNCS('timegm getifaddrs freeifaddrs mmap setgroups syscall setsid') + conf.CHECK_FUNCS('getgrent_r getgrgid_r getgrnam_r getgrouplist getpagesize') + conf.CHECK_FUNCS('getpwent_r getpwnam_r getpwuid_r epoll_create1') + conf.CHECK_FUNCS('getprogname') + if not conf.CHECK_FUNCS('copy_file_range'): + conf.CHECK_CODE(''' +syscall(SYS_copy_file_range,0,NULL,0,NULL,0,0); + ''', + 'HAVE_SYSCALL_COPY_FILE_RANGE', + headers='sys/syscall.h unistd.h', + msg='Checking whether we have copy_file_range system call') + + conf.SET_TARGET_TYPE('attr', 'EMPTY') + + xattr_headers='sys/attributes.h attr/xattr.h sys/xattr.h' + + # default to 1, we set it to 0 if we don't find any EA implementation below: + conf.DEFINE('HAVE_XATTR_SUPPORT', 1) + if conf.CHECK_FUNCS_IN('getxattr', 'attr', checklibc=True, headers=xattr_headers): + conf.DEFINE('HAVE_XATTR_XATTR', 1) + # Darwin has extra options to xattr-family functions + conf.CHECK_CODE('getxattr(NULL, NULL, NULL, 0, 0, 0)', + headers=xattr_headers, local_include=False, + define='XATTR_ADDITIONAL_OPTIONS', + msg="Checking whether xattr interface takes additional options") + elif conf.CHECK_FUNCS_IN('attr_listf', 'attr', checklibc=True, headers=xattr_headers): + conf.DEFINE('HAVE_XATTR_ATTR', 1) + elif conf.CHECK_FUNCS('extattr_list_fd'): + conf.DEFINE('HAVE_XATTR_EXTATTR', 1) + elif conf.CHECK_FUNCS('flistea'): + conf.DEFINE('HAVE_XATTR_EA', 1) + elif not conf.CHECK_FUNCS('attropen'): + conf.DEFINE('HAVE_XATTR_SUPPORT', 0) + + + conf.CHECK_FUNCS_IN('dlopen dlsym dlerror dlclose', 'dl', + checklibc=True, headers='dlfcn.h dl.h') + + conf.CHECK_C_PROTOTYPE('dlopen', 'void *dlopen(const char* filename, unsigned int flags)', + define='DLOPEN_TAKES_UNSIGNED_FLAGS', headers='dlfcn.h dl.h') + + # + # Check for clock_gettime and fdatasync + # + # First check libc to avoid linking libreplace against librt. + # + if conf.CHECK_FUNCS('fdatasync'): + # some systems are missing the declaration + conf.CHECK_DECLS('fdatasync') + else: + if conf.CHECK_FUNCS_IN('fdatasync', 'rt'): + # some systems are missing the declaration + conf.CHECK_DECLS('fdatasync') + + has_clock_gettime = False + if conf.CHECK_FUNCS('clock_gettime'): + has_clock_gettime = True + + if not has_clock_gettime: + if conf.CHECK_FUNCS_IN('clock_gettime', 'rt', checklibc=True): + has_clock_gettime = True + + if has_clock_gettime: + for c in ['CLOCK_MONOTONIC', 'CLOCK_PROCESS_CPUTIME_ID', 'CLOCK_REALTIME']: + conf.CHECK_CODE(''' + #if TIME_WITH_SYS_TIME + # include <sys/time.h> + # include <time.h> + #else + # if HAVE_SYS_TIME_H + # include <sys/time.h> + # else + # include <time.h> + # endif + #endif + clockid_t clk = %s''' % c, + 'HAVE_%s' % c, + msg='Checking whether the clock_gettime clock ID %s is available' % c) + + conf.CHECK_TYPE('struct timespec', headers='sys/time.h time.h') + + # these headers need to be tested as a group on freebsd + conf.CHECK_HEADERS(headers='sys/socket.h net/if.h', together=True) + conf.CHECK_HEADERS(headers='netinet/in.h arpa/nameser.h resolv.h', together=True) + conf.CHECK_FUNCS_IN('res_search', 'resolv', checklibc=True, + headers='netinet/in.h arpa/nameser.h resolv.h') + + + # try to find libintl (if --without-gettext is not given) + conf.env.intl_libs='' + if not Options.options.disable_gettext: + conf.CHECK_HEADERS('libintl.h') + conf.CHECK_LIB('intl') + conf.CHECK_DECLS('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', headers="libintl.h") + # *textdomain functions are not strictly necessary + conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', + '', checklibc=True, headers='libintl.h') + # gettext and dgettext must exist + # on some systems (the ones with glibc, those are in libc) + if conf.CHECK_FUNCS_IN('dgettext gettext', '', checklibc=True, headers='libintl.h'): + # save for dependency definitions + conf.env.intl_libs='' + # others (e.g. FreeBSD) have separate libintl + elif conf.CHECK_FUNCS_IN('dgettext gettext', 'intl', checklibc=False, headers='libintl.h'): + # save for dependency definitions + conf.env.intl_libs='intl' + # recheck with libintl + conf.CHECK_FUNCS_IN('bindtextdomain textdomain bind_textdomain_codeset', + 'intl', checklibc=False, headers='libintl.h') + else: + # Some hosts need lib iconv for linking with lib intl + # So we try with flags just in case it helps. + oldflags = list(conf.env['EXTRA_LDFLAGS']); + conf.env['EXTRA_LDFLAGS'].extend(["-liconv"]) + conf.CHECK_FUNCS_IN('dgettext gettext bindtextdomain textdomain bind_textdomain_codeset', + 'intl', checklibc=False, headers='libintl.h') + conf.env['EXTRA_LDFLAGS'] = oldflags + if conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DGETTEXT']: + # save for dependency definitions + conf.env.intl_libs='iconv intl' + + # did we find both prototypes and a library to link against? + # if not, unset the detected values (see Bug #9911) + if not (conf.env['HAVE_GETTEXT'] and conf.env['HAVE_DECL_GETTEXT']): + conf.undefine('HAVE_GETTEXT') + conf.undefine('HAVE_DECL_GETTEXT') + if not (conf.env['HAVE_DGETTEXT'] and conf.env['HAVE_DECL_DGETTEXT']): + conf.undefine('HAVE_DGETTEXT') + conf.undefine('HAVE_DECL_DGETTEXT') + + conf.CHECK_FUNCS_IN('pthread_create', 'pthread', checklibc=True, headers='pthread.h') + + PTHREAD_CFLAGS='error' + PTHREAD_LDFLAGS='error' + + if PTHREAD_LDFLAGS == 'error': + # Check if pthread_attr_init() is provided by libc first! + if conf.CHECK_FUNCS('pthread_attr_init'): + PTHREAD_CFLAGS='-D_REENTRANT' + PTHREAD_LDFLAGS='' + if PTHREAD_LDFLAGS == 'error': + if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthread'): + PTHREAD_CFLAGS='-D_REENTRANT -D_POSIX_PTHREAD_SEMANTICS' + PTHREAD_LDFLAGS='-lpthread' + if PTHREAD_LDFLAGS == 'error': + if conf.CHECK_FUNCS_IN('pthread_attr_init', 'pthreads'): + PTHREAD_CFLAGS='-D_THREAD_SAFE' + PTHREAD_LDFLAGS='-lpthreads' + if PTHREAD_LDFLAGS == 'error': + if conf.CHECK_FUNCS_IN('pthread_attr_init', 'c_r'): + PTHREAD_CFLAGS='-D_THREAD_SAFE -pthread' + PTHREAD_LDFLAGS='-pthread' + + # especially for HP-UX, where the CHECK_FUNC macro fails to test for + # pthread_attr_init. On pthread_mutex_lock it works there... + if PTHREAD_LDFLAGS == 'error': + if conf.CHECK_FUNCS_IN('pthread_mutex_lock', 'pthread'): + PTHREAD_CFLAGS='-D_REENTRANT' + PTHREAD_LDFLAGS='-lpthread' + + if PTHREAD_CFLAGS != 'error' and PTHREAD_LDFLAGS != 'error': + if conf.CONFIG_SET('replace_add_global_pthread'): + conf.ADD_CFLAGS(PTHREAD_CFLAGS) + conf.ADD_LDFLAGS(PTHREAD_LDFLAGS) + conf.CHECK_HEADERS('pthread.h') + conf.DEFINE('HAVE_PTHREAD', '1') + + if conf.CONFIG_SET('HAVE_PTHREAD'): + + conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust', 'pthread', + checklibc=True, headers='pthread.h') + if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST'): + conf.CHECK_FUNCS_IN('pthread_mutexattr_setrobust_np', 'pthread', + checklibc=True, headers='pthread.h') + + conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST', headers='pthread.h') + if not conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST'): + conf.CHECK_DECLS('PTHREAD_MUTEX_ROBUST_NP', headers='pthread.h') + + conf.CHECK_FUNCS_IN('pthread_mutex_consistent', 'pthread', + checklibc=True, headers='pthread.h') + if not conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT'): + conf.CHECK_FUNCS_IN('pthread_mutex_consistent_np', 'pthread', + checklibc=True, headers='pthread.h') + + if ((conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST') or + conf.CONFIG_SET('HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP')) and + (conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST') or + conf.CONFIG_SET('HAVE_DECL_PTHREAD_MUTEX_ROBUST_NP')) and + (conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT') or + conf.CONFIG_SET('HAVE_PTHREAD_MUTEX_CONSISTENT_NP'))): + conf.DEFINE('HAVE_ROBUST_MUTEXES', 1) + + # __thread is available in Solaris Studio, IBM XL, + # gcc, Clang and Intel C Compiler + conf.CHECK_CODE(''' + __thread int tls; + + int main(void) { + return 0; + } + ''', + 'HAVE___THREAD', + addmain=False, + msg='Checking for __thread local storage') + + if conf.CONFIG_SET('HAVE_PTHREAD') and not conf.CONFIG_SET('HAVE___THREAD'): + conf.fatal('Missing required TLS support in pthread library') + + conf.CHECK_FUNCS_IN('crypt', 'crypt', checklibc=True) + conf.CHECK_FUNCS_IN('crypt_r', 'crypt', checklibc=True) + conf.CHECK_FUNCS_IN('crypt_rn', 'crypt', checklibc=True) + + conf.CHECK_VARIABLE('rl_event_hook', define='HAVE_DECL_RL_EVENT_HOOK', always=True, + headers='readline.h readline/readline.h readline/history.h') + conf.CHECK_VARIABLE('program_invocation_short_name', headers='errno.h') + + conf.CHECK_DECLS('snprintf vsnprintf asprintf vasprintf') + + conf.CHECK_DECLS('errno', headers='errno.h', reverse=True) + conf.CHECK_DECLS('EWOULDBLOCK', headers='errno.h') + conf.CHECK_DECLS('environ', reverse=True, headers='unistd.h') + conf.CHECK_DECLS('getgrent_r getpwent_r', reverse=True, headers='pwd.h grp.h') + conf.CHECK_DECLS('pread pwrite setenv setresgid setresuid', reverse=True) + + if conf.CONFIG_SET('HAVE_EPOLL_CREATE1') and conf.CONFIG_SET('HAVE_SYS_EPOLL_H'): + conf.DEFINE('HAVE_EPOLL', 1) + + if conf.CHECK_FUNCS('eventfd', headers='sys/eventfd.h'): + conf.DEFINE('HAVE_EVENTFD', 1) + + conf.CHECK_HEADERS('poll.h') + conf.CHECK_FUNCS('poll') + + conf.CHECK_FUNCS('strptime') + conf.CHECK_DECLS('strptime', headers='time.h') + conf.CHECK_CODE('''#define LIBREPLACE_CONFIGURE_TEST_STRPTIME + #include "tests/strptime.c"''', + define='HAVE_WORKING_STRPTIME', + execute=True, + addmain=False, + msg='Checking for working strptime') + + conf.CHECK_C_PROTOTYPE('gettimeofday', + 'int gettimeofday(struct timeval *tv, struct timezone *tz)', + define='HAVE_GETTIMEOFDAY_TZ', headers='sys/time.h') + + conf.CHECK_C_PROTOTYPE('gettimeofday', + 'int gettimeofday(struct timeval *tv, void *tz)', + define='HAVE_GETTIMEOFDAY_TZ_VOID', + headers='sys/time.h') + + conf.CHECK_CODE('#include "tests/snprintf.c"', + define="HAVE_C99_VSNPRINTF", + execute=True, + addmain=False, + msg="Checking for C99 vsnprintf") + + conf.CHECK_CODE('#include "tests/shared_mmap.c"', + addmain=False, add_headers=False, execute=True, + define='HAVE_SHARED_MMAP', + msg="Checking for HAVE_SHARED_MMAP") + + conf.CHECK_CODE('#include "tests/shared_mremap.c"', + addmain=False, add_headers=False, execute=True, + define='HAVE_MREMAP', + msg="Checking for HAVE_MREMAP") + + # OpenBSD (and I've heard HPUX) doesn't sync between mmap and write. + # FIXME: Anything other than a 0 or 1 exit code should abort configure! + conf.CHECK_CODE('#include "tests/incoherent_mmap.c"', + addmain=False, add_headers=False, execute=True, + define='HAVE_INCOHERENT_MMAP', + msg="Checking for HAVE_INCOHERENT_MMAP") + + conf.SAMBA_BUILD_ENV() + + conf.CHECK_CODE(''' + typedef struct {unsigned x;} FOOBAR; + #define X_FOOBAR(x) ((FOOBAR) { x }) + #define FOO_ONE X_FOOBAR(1) + FOOBAR f = FOO_ONE; + static const struct { + FOOBAR y; + } f2[] = { + {FOO_ONE} + }; + static const FOOBAR f3[] = {FOO_ONE}; + ''', + define='HAVE_IMMEDIATE_STRUCTURES') + + conf.CHECK_CODE('mkdir("foo",0777)', define='HAVE_MKDIR_MODE', headers='sys/stat.h') + + # we need the st_rdev test under two names + conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', + define='HAVE_STRUCT_STAT_ST_RDEV', + headers='sys/stat.h') + conf.CHECK_STRUCTURE_MEMBER('struct stat', 'st_rdev', define='HAVE_ST_RDEV', + headers='sys/stat.h') + conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', 'ss_family', + headers='sys/socket.h netinet/in.h') + conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_storage', '__ss_family', + headers='sys/socket.h netinet/in.h') + + + if conf.CHECK_STRUCTURE_MEMBER('struct sockaddr', 'sa_len', + headers='sys/socket.h netinet/in.h', + define='HAVE_SOCKADDR_SA_LEN'): + # the old build system produced both defines + conf.DEFINE('HAVE_STRUCT_SOCKADDR_SA_LEN', 1) + + conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in', 'sin_len', + headers='sys/socket.h netinet/in.h', + define='HAVE_SOCK_SIN_LEN') + + conf.CHECK_STRUCTURE_MEMBER('struct sockaddr_in6', 'sin6_len', + headers='sys/socket.h netinet/in.h', + define='HAVE_SOCK_SIN6_LEN') + + conf.CHECK_CODE('struct sockaddr_un sunaddr; sunaddr.sun_family = AF_UNIX;', + define='HAVE_UNIXSOCKET', headers='sys/socket.h sys/un.h') + + + conf.CHECK_CODE(''' + struct stat st; + char tpl[20]="/tmp/test.XXXXXX"; + char tpl2[20]="/tmp/test.XXXXXX"; + int fd = mkstemp(tpl); + int fd2 = mkstemp(tpl2); + if (fd == -1) { + if (fd2 != -1) { + unlink(tpl2); + } + exit(1); + } + if (fd2 == -1) exit(1); + unlink(tpl); + unlink(tpl2); + if (fstat(fd, &st) != 0) exit(1); + if ((st.st_mode & 0777) != 0600) exit(1); + if (strcmp(tpl, "/tmp/test.XXXXXX") == 0) { + exit(1); + } + if (strcmp(tpl, tpl2) == 0) { + exit(1); + } + exit(0); + ''', + define='HAVE_SECURE_MKSTEMP', + execute=True, + mandatory=True) # lets see if we get a mandatory failure for this one + + # look for a method of finding the list of network interfaces + for method in ['HAVE_IFACE_GETIFADDRS', 'HAVE_IFACE_AIX', 'HAVE_IFACE_IFCONF', 'HAVE_IFACE_IFREQ']: + bsd_for_strlcpy = '' + if strlcpy_in_bsd: + bsd_for_strlcpy = ' bsd' + if conf.CHECK_CODE(''' + #define %s 1 + #define NO_CONFIG_H 1 + #define AUTOCONF_TEST 1 + #include "replace.c" + #include "inet_ntop.c" + #include "snprintf.c" + #include "getifaddrs.c" + #define getifaddrs_test main + #include "tests/getifaddrs.c" + ''' % method, + method, + lib='nsl socket' + bsd_for_strlcpy, + addmain=False, + execute=True): + break + + conf.RECURSE('system') + conf.SAMBA_CONFIG_H() + if conf.CHECK_FUNCS('strerror_r'): + # Check if strerror_r is XSI-Compatable, the default GNU implementation + # is not + conf.CHECK_CODE('int strerror_r(int errnum, char *buf, size_t buflen);', + 'STRERROR_R_XSI_NOT_GNU', + headers='string.h', addmain=False, link=False, + msg="Checking for XSI (rather than GNU) prototype for strerror_r") + + +REPLACEMENT_FUNCTIONS = { + 'replace.c': ['ftruncate', 'strlcpy', 'strlcat', 'mktime', 'initgroups', + 'memmove', 'strdup', 'setlinebuf', 'vsyslog', 'strnlen', + 'strndup', 'waitpid', 'seteuid', 'setegid', 'chroot', + 'mkstemp', 'mkdtemp', 'pread', 'pwrite', 'strcasestr', + 'strsep', 'strtok_r', 'strtoll', 'strtoull', 'setenv', 'unsetenv', + 'utime', 'utimes', 'dup2', 'chown', 'link', 'readlink', + 'symlink', 'lchown', 'realpath', 'memmem', 'vdprintf', + 'dprintf', 'get_current_dir_name', 'copy_file_range', + 'strerror_r', 'clock_gettime', 'memset_s'], + 'timegm.c': ['timegm'], + # Note: C99_VSNPRINTF is not a function, but a special condition + # for replacement + 'snprintf.c': ['C99_VSNPRINTF', 'snprintf', 'vsnprintf', 'asprintf', 'vasprintf'], + # Note: WORKING_STRPTIME is not a function, but a special condition + # for replacement + 'strptime.c': ['WORKING_STRPTIME', 'strptime'], + } + + +def build(bld): + bld.RECURSE('buildtools/wafsamba') + + REPLACE_HOSTCC_SOURCE = '' + + for filename in REPLACEMENT_FUNCTIONS.keys(): + for function in REPLACEMENT_FUNCTIONS[filename]: + if not bld.CONFIG_SET('HAVE_%s' % function.upper()): + REPLACE_HOSTCC_SOURCE += ' %s' % filename + break + + extra_libs = '' + if bld.CONFIG_SET('HAVE_LIBBSD'): extra_libs += ' bsd' + if bld.CONFIG_SET('HAVE_LIBRT'): extra_libs += ' rt' + if bld.CONFIG_SET('REPLACE_REQUIRES_LIBSOCKET_LIBNSL'): extra_libs += ' socket nsl' + + if not bld.CONFIG_SET('HAVE_CLOSEFROM'): + REPLACE_HOSTCC_SOURCE += ' closefrom.c' + + bld.SAMBA_SUBSYSTEM('LIBREPLACE_HOSTCC', + REPLACE_HOSTCC_SOURCE, + use_hostcc=True, + use_global_deps=False, + group='hostcc_base_build_main', + deps = extra_libs + ) + + REPLACE_SOURCE = REPLACE_HOSTCC_SOURCE + + if not bld.CONFIG_SET('HAVE_DLOPEN'): REPLACE_SOURCE += ' dlfcn.c' + if not bld.CONFIG_SET('HAVE_POLL'): REPLACE_SOURCE += ' poll.c' + + if not bld.CONFIG_SET('HAVE_SOCKETPAIR'): REPLACE_SOURCE += ' socketpair.c' + if not bld.CONFIG_SET('HAVE_CONNECT'): REPLACE_SOURCE += ' socket.c' + if not bld.CONFIG_SET('HAVE_GETIFADDRS'): REPLACE_SOURCE += ' getifaddrs.c' + if not bld.CONFIG_SET('HAVE_GETADDRINFO'): REPLACE_SOURCE += ' getaddrinfo.c' + if not bld.CONFIG_SET('HAVE_INET_NTOA'): REPLACE_SOURCE += ' inet_ntoa.c' + if not bld.CONFIG_SET('HAVE_INET_ATON'): REPLACE_SOURCE += ' inet_aton.c' + if not bld.CONFIG_SET('HAVE_INET_NTOP'): REPLACE_SOURCE += ' inet_ntop.c' + if not bld.CONFIG_SET('HAVE_INET_PTON'): REPLACE_SOURCE += ' inet_pton.c' + if not bld.CONFIG_SET('HAVE_GETXATTR') or bld.CONFIG_SET('XATTR_ADDITIONAL_OPTIONS'): + REPLACE_SOURCE += ' xattr.c' + + if not bld.CONFIG_SET('HAVE_CLOSEFROM'): + REPLACE_SOURCE += ' closefrom.c' + + bld.SAMBA_LIBRARY('replace', + source=REPLACE_SOURCE, + group='base_libraries', + # FIXME: Ideally symbols should be hidden here so they + # don't appear in the global namespace when Samba + # libraries are loaded, but this doesn't appear to work + # at the moment: + # hide_symbols=bld.BUILTIN_LIBRARY('replace'), + private_library=True, + provide_builtin_linking=True, + deps='dl attr' + extra_libs) + + replace_test_cflags = '' + if bld.CONFIG_SET('HAVE_WNO_FORMAT_TRUNCATION'): + replace_test_cflags += " -Wno-format-truncation" + bld.SAMBA_SUBSYSTEM('replace-test', + source='''tests/testsuite.c tests/strptime.c + tests/os2_delete.c tests/getifaddrs.c''', + deps='replace', + cflags=replace_test_cflags) + + bld.SAMBA_BINARY('replace_testsuite', + source='tests/main.c', + deps='replace replace-test', + install=False) + + # build replacements for stdint.h and stdbool.h if needed + bld.SAMBA_GENERATOR('replace_stdint_h', + rule='cp ${SRC} ${TGT}', + source='hdr_replace.h', + target='stdint.h', + enabled = not bld.CONFIG_SET('HAVE_STDINT_H')) + bld.SAMBA_GENERATOR('replace_stdbool_h', + rule='cp ${SRC} ${TGT}', + source='hdr_replace.h', + target='stdbool.h', + enabled = not bld.CONFIG_SET('HAVE_STDBOOL_H')) + + bld.SAMBA_SUBSYSTEM('samba_intl', source='', use_global_deps=False,deps=bld.env.intl_libs) + +def testonly(ctx): + '''run talloc testsuite''' + import samba_utils + + samba_utils.ADD_LD_LIBRARY_PATH('bin/shared') + samba_utils.ADD_LD_LIBRARY_PATH('bin/shared/private') + + cmd = os.path.join(Context.g_module.out, 'replace_testsuite') + ret = samba_utils.RUN_COMMAND(cmd) + print("testsuite returned %d" % ret) + sys.exit(ret) + +# WAF doesn't build the unit tests for this, maybe because they don't link with talloc? +# This forces it +def test(ctx): + Options.commands.append('build') + Options.commands.append('testonly') + +def dist(): + '''makes a tarball for distribution''' + samba_dist.dist() diff --git a/lib/replace/xattr.c b/lib/replace/xattr.c new file mode 100644 index 0000000..c9b7126 --- /dev/null +++ b/lib/replace/xattr.c @@ -0,0 +1,852 @@ +/* + Unix SMB/CIFS implementation. + replacement routines for xattr implementations + Copyright (C) Jeremy Allison 1998-2005 + Copyright (C) Timur Bakeyev 2005 + Copyright (C) Bjoern Jacke 2006-2007 + Copyright (C) Herb Lewis 2003 + Copyright (C) Andrew Bartlett 2012 + + ** NOTE! The following LGPL license applies to the replace + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#define UID_WRAPPER_NOT_REPLACE +#include "replace.h" +#include "system/filesys.h" +#include "system/dir.h" + +/******** Solaris EA helper function prototypes ********/ +#ifdef HAVE_ATTROPEN +#define SOLARIS_ATTRMODE S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP +static int solaris_write_xattr(int attrfd, const char *value, size_t size); +static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size); +static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size); +static int solaris_unlinkat(int attrdirfd, const char *name); +static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode); +static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode); +#endif + +/************************************************************************** + Wrappers for extended attribute calls. Based on the Linux package with + support for IRIX and (Net|Free)BSD also. Expand as other systems have them. +****************************************************************************/ + +ssize_t rep_getxattr (const char *path, const char *name, void *value, size_t size) +{ +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + return getxattr(path, name, value, size); +#else + +/* So that we do not recursively call this function */ +#undef getxattr + int options = 0; + return getxattr(path, name, value, size, 0, options); +#endif +#elif defined(HAVE_XATTR_EA) + return getea(path, name, value, size); +#elif defined(HAVE_XATTR_EXTATTR) + ssize_t retval; + int attrnamespace; + const char *attrname; + + if (strncmp(name, "system.", 7) == 0) { + attrnamespace = EXTATTR_NAMESPACE_SYSTEM; + attrname = name + 7; + } else if (strncmp(name, "user.", 5) == 0) { + attrnamespace = EXTATTR_NAMESPACE_USER; + attrname = name + 5; + } else { + errno = EINVAL; + return -1; + } + + /* + * The BSD implementation has a nasty habit of silently truncating + * the returned value to the size of the buffer, so we have to check + * that the buffer is large enough to fit the returned value. + */ + if((retval=extattr_get_file(path, attrnamespace, attrname, NULL, 0)) >= 0) { + if (size == 0) { + return retval; + } else if (retval > size) { + errno = ERANGE; + return -1; + } + if((retval=extattr_get_file(path, attrnamespace, attrname, value, size)) >= 0) + return retval; + } + + return -1; +#elif defined(HAVE_XATTR_ATTR) + int retval, flags = 0; + int valuelength = (int)size; + char *attrname = strchr(name,'.') + 1; + + if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; + + retval = attr_get(path, attrname, (char *)value, &valuelength, flags); + if (size == 0 && retval == -1 && errno == E2BIG) { + return valuelength; + } + + return retval ? retval : valuelength; +#elif defined(HAVE_ATTROPEN) + ssize_t ret = -1; + int attrfd = solaris_attropen(path, name, O_RDONLY, 0); + if (attrfd >= 0) { + ret = solaris_read_xattr(attrfd, value, size); + close(attrfd); + } + return ret; +#else + errno = ENOSYS; + return -1; +#endif +} + +ssize_t rep_fgetxattr (int filedes, const char *name, void *value, size_t size) +{ +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + return fgetxattr(filedes, name, value, size); +#else + +/* So that we do not recursively call this function */ +#undef fgetxattr + int options = 0; + return fgetxattr(filedes, name, value, size, 0, options); +#endif +#elif defined(HAVE_XATTR_EA) + return fgetea(filedes, name, value, size); +#elif defined(HAVE_XATTR_EXTATTR) + ssize_t retval; + int attrnamespace; + const char *attrname; + + if (strncmp(name, "system.", 7) == 0) { + attrnamespace = EXTATTR_NAMESPACE_SYSTEM; + attrname = name + 7; + } else if (strncmp(name, "user.", 5) == 0) { + attrnamespace = EXTATTR_NAMESPACE_USER; + attrname = name + 5; + } else { + errno = EINVAL; + return -1; + } + + if((retval=extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0)) >= 0) { + if (size == 0) { + return retval; + } else if (retval > size) { + errno = ERANGE; + return -1; + } + if((retval=extattr_get_fd(filedes, attrnamespace, attrname, value, size)) >= 0) + return retval; + } + + return -1; +#elif defined(HAVE_XATTR_ATTR) + int retval, flags = 0; + int valuelength = (int)size; + char *attrname = strchr(name,'.') + 1; + + if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; + + retval = attr_getf(filedes, attrname, (char *)value, &valuelength, flags); + if (size == 0 && retval == -1 && errno == E2BIG) { + return valuelength; + } + return retval ? retval : valuelength; +#elif defined(HAVE_ATTROPEN) + ssize_t ret = -1; + int attrfd = solaris_openat(filedes, name, O_RDONLY|O_XATTR, 0); + if (attrfd >= 0) { + ret = solaris_read_xattr(attrfd, value, size); + close(attrfd); + } + return ret; +#else + errno = ENOSYS; + return -1; +#endif +} + +#if defined(HAVE_XATTR_EXTATTR) + +#define EXTATTR_PREFIX(s) (s), (sizeof((s))-1) + +static struct { + int space; + const char *name; + size_t len; +} +extattr[] = { + { EXTATTR_NAMESPACE_SYSTEM, EXTATTR_PREFIX("system.") }, + { EXTATTR_NAMESPACE_USER, EXTATTR_PREFIX("user.") }, +}; + +typedef union { + const char *path; + int filedes; +} extattr_arg; + +static ssize_t bsd_attr_list (int type, extattr_arg arg, char *list, size_t size) +{ + ssize_t list_size, total_size = 0; + int i, len; + size_t t; + char *buf; + /* Iterate through extattr(2) namespaces */ + for(t = 0; t < ARRAY_SIZE(extattr); t++) { + if (t != EXTATTR_NAMESPACE_USER && geteuid() != 0) { + /* ignore all but user namespace when we are not root, see bug 10247 */ + continue; + } + switch(type) { + case 0: + list_size = extattr_list_file(arg.path, extattr[t].space, list, size); + break; + case 1: + list_size = extattr_list_link(arg.path, extattr[t].space, list, size); + break; + case 2: + list_size = extattr_list_fd(arg.filedes, extattr[t].space, list, size); + break; + default: + errno = ENOSYS; + return -1; + } + /* Some error happened. Errno should be set by the previous call */ + if(list_size < 0) + return -1; + /* No attributes */ + if(list_size == 0) + continue; + /* XXX: Call with an empty buffer may be used to calculate + necessary buffer size. Unfortunately, we can't say, how + many attributes were returned, so here is the potential + problem with the emulation. + */ + if(list == NULL) { + /* Take the worse case of one char attribute names - + two bytes per name plus one more for sanity. + */ + total_size += list_size + (list_size/2 + 1)*extattr[t].len; + continue; + } + /* Count necessary offset to fit namespace prefixes */ + len = 0; + for(i = 0; i < list_size; i += list[i] + 1) + len += extattr[t].len; + + total_size += list_size + len; + /* Buffer is too small to fit the results */ + if(total_size > size) { + errno = ERANGE; + return -1; + } + /* Shift results back, so we can prepend prefixes */ + buf = (char *)memmove(list + len, list, list_size); + + for(i = 0; i < list_size; i += len + 1) { + len = buf[i]; + + /* + * If for some reason we receive a truncated + * return from call to list xattrs the pascal + * string lengths will not be changed and + * therefore we must check that we're not + * reading garbage data or off end of array + */ + if (len + i >= list_size) { + errno = ERANGE; + return -1; + } + strncpy(list, extattr[t].name, extattr[t].len + 1); + list += extattr[t].len; + strncpy(list, buf + i + 1, len); + list[len] = '\0'; + list += len + 1; + } + size -= total_size; + } + return total_size; +} + +#endif + +#if defined(HAVE_XATTR_ATTR) && (defined(HAVE_SYS_ATTRIBUTES_H) || defined(HAVE_ATTR_ATTRIBUTES_H)) +static char attr_buffer[ATTR_MAX_VALUELEN]; + +static ssize_t irix_attr_list(const char *path, int filedes, char *list, size_t size, int flags) +{ + int retval = 0, index; + attrlist_cursor_t *cursor = 0; + int total_size = 0; + attrlist_t * al = (attrlist_t *)attr_buffer; + attrlist_ent_t *ae; + size_t ent_size, left = size; + char *bp = list; + + while (true) { + if (filedes) + retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); + else + retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); + if (retval) break; + for (index = 0; index < al->al_count; index++) { + ae = ATTR_ENTRY(attr_buffer, index); + ent_size = strlen(ae->a_name) + sizeof("user."); + if (left >= ent_size) { + strncpy(bp, "user.", sizeof("user.")); + strncat(bp, ae->a_name, ent_size - sizeof("user.")); + bp += ent_size; + left -= ent_size; + } else if (size) { + errno = ERANGE; + retval = -1; + break; + } + total_size += ent_size; + } + if (al->al_more == 0) break; + } + if (retval == 0) { + flags |= ATTR_ROOT; + cursor = 0; + while (true) { + if (filedes) + retval = attr_listf(filedes, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); + else + retval = attr_list(path, attr_buffer, ATTR_MAX_VALUELEN, flags, cursor); + if (retval) break; + for (index = 0; index < al->al_count; index++) { + ae = ATTR_ENTRY(attr_buffer, index); + ent_size = strlen(ae->a_name) + sizeof("system."); + if (left >= ent_size) { + strncpy(bp, "system.", sizeof("system.")); + strncat(bp, ae->a_name, ent_size - sizeof("system.")); + bp += ent_size; + left -= ent_size; + } else if (size) { + errno = ERANGE; + retval = -1; + break; + } + total_size += ent_size; + } + if (al->al_more == 0) break; + } + } + return (ssize_t)(retval ? retval : total_size); +} + +#endif + +ssize_t rep_listxattr (const char *path, char *list, size_t size) +{ +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + return listxattr(path, list, size); +#else +/* So that we do not recursively call this function */ +#undef listxattr + int options = 0; + return listxattr(path, list, size, options); +#endif +#elif defined(HAVE_XATTR_EA) + return listea(path, list, size); +#elif defined(HAVE_XATTR_EXTATTR) + extattr_arg arg; + arg.path = path; + return bsd_attr_list(0, arg, list, size); +#elif defined(HAVE_XATTR_ATTR) && defined(HAVE_SYS_ATTRIBUTES_H) + return irix_attr_list(path, 0, list, size, 0); +#elif defined(HAVE_ATTROPEN) + ssize_t ret = -1; + int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); + if (attrdirfd >= 0) { + ret = solaris_list_xattr(attrdirfd, list, size); + close(attrdirfd); + } + return ret; +#else + errno = ENOSYS; + return -1; +#endif +} + +ssize_t rep_flistxattr (int filedes, char *list, size_t size) +{ +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + return flistxattr(filedes, list, size); +#else +/* So that we do not recursively call this function */ +#undef flistxattr + int options = 0; + return flistxattr(filedes, list, size, options); +#endif +#elif defined(HAVE_XATTR_EA) + return flistea(filedes, list, size); +#elif defined(HAVE_XATTR_EXTATTR) + extattr_arg arg; + arg.filedes = filedes; + return bsd_attr_list(2, arg, list, size); +#elif defined(HAVE_XATTR_ATTR) + return irix_attr_list(NULL, filedes, list, size, 0); +#elif defined(HAVE_ATTROPEN) + ssize_t ret = -1; + int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); + if (attrdirfd >= 0) { + ret = solaris_list_xattr(attrdirfd, list, size); + close(attrdirfd); + } + return ret; +#else + errno = ENOSYS; + return -1; +#endif +} + +int rep_removexattr (const char *path, const char *name) +{ +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + return removexattr(path, name); +#else +/* So that we do not recursively call this function */ +#undef removexattr + int options = 0; + return removexattr(path, name, options); +#endif +#elif defined(HAVE_XATTR_EA) + return removeea(path, name); +#elif defined(HAVE_XATTR_EXTATTR) + int attrnamespace; + const char *attrname; + + if (strncmp(name, "system.", 7) == 0) { + attrnamespace = EXTATTR_NAMESPACE_SYSTEM; + attrname = name + 7; + } else if (strncmp(name, "user.", 5) == 0) { + attrnamespace = EXTATTR_NAMESPACE_USER; + attrname = name + 5; + } else { + errno = EINVAL; + return -1; + } + + return extattr_delete_file(path, attrnamespace, attrname); +#elif defined(HAVE_XATTR_ATTR) + int flags = 0; + char *attrname = strchr(name,'.') + 1; + + if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; + + return attr_remove(path, attrname, flags); +#elif defined(HAVE_ATTROPEN) + int ret = -1; + int attrdirfd = solaris_attropen(path, ".", O_RDONLY, 0); + if (attrdirfd >= 0) { + ret = solaris_unlinkat(attrdirfd, name); + close(attrdirfd); + } + return ret; +#else + errno = ENOSYS; + return -1; +#endif +} + +int rep_fremovexattr (int filedes, const char *name) +{ +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + return fremovexattr(filedes, name); +#else +/* So that we do not recursively call this function */ +#undef fremovexattr + int options = 0; + return fremovexattr(filedes, name, options); +#endif +#elif defined(HAVE_XATTR_EA) + return fremoveea(filedes, name); +#elif defined(HAVE_XATTR_EXTATTR) + int attrnamespace; + const char *attrname; + + if (strncmp(name, "system.", 7) == 0) { + attrnamespace = EXTATTR_NAMESPACE_SYSTEM; + attrname = name + 7; + } else if (strncmp(name, "user.", 5) == 0) { + attrnamespace = EXTATTR_NAMESPACE_USER; + attrname = name + 5; + } else { + errno = EINVAL; + return -1; + } + + return extattr_delete_fd(filedes, attrnamespace, attrname); +#elif defined(HAVE_XATTR_ATTR) + int flags = 0; + char *attrname = strchr(name,'.') + 1; + + if (strncmp(name, "system", 6) == 0) flags |= ATTR_ROOT; + + return attr_removef(filedes, attrname, flags); +#elif defined(HAVE_ATTROPEN) + int ret = -1; + int attrdirfd = solaris_openat(filedes, ".", O_RDONLY|O_XATTR, 0); + if (attrdirfd >= 0) { + ret = solaris_unlinkat(attrdirfd, name); + close(attrdirfd); + } + return ret; +#else + errno = ENOSYS; + return -1; +#endif +} + +int rep_setxattr (const char *path, const char *name, const void *value, size_t size, int flags) +{ + int retval = -1; +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + retval = setxattr(path, name, value, size, flags); + if (retval < 0) { + if (errno == ENOSPC || errno == E2BIG) { + errno = ENAMETOOLONG; + } + } + return retval; +#else +/* So that we do not recursively call this function */ +#undef setxattr + retval = setxattr(path, name, value, size, 0, flags); + if (retval < 0) { + if (errno == E2BIG) { + errno = ENAMETOOLONG; + } + } + return retval; +#endif +#elif defined(HAVE_XATTR_EA) + if (flags) { + retval = getea(path, name, NULL, 0); + if (retval < 0) { + if (flags & XATTR_REPLACE && errno == ENOATTR) { + return -1; + } + } else { + if (flags & XATTR_CREATE) { + errno = EEXIST; + return -1; + } + } + } + retval = setea(path, name, value, size, 0); + return retval; +#elif defined(HAVE_XATTR_EXTATTR) + int attrnamespace; + const char *attrname; + + if (strncmp(name, "system.", 7) == 0) { + attrnamespace = EXTATTR_NAMESPACE_SYSTEM; + attrname = name + 7; + } else if (strncmp(name, "user.", 5) == 0) { + attrnamespace = EXTATTR_NAMESPACE_USER; + attrname = name + 5; + } else { + errno = EINVAL; + return -1; + } + + if (flags) { + /* Check attribute existence */ + retval = extattr_get_file(path, attrnamespace, attrname, NULL, 0); + if (retval < 0) { + /* REPLACE attribute, that doesn't exist */ + if (flags & XATTR_REPLACE && errno == ENOATTR) { + errno = ENOATTR; + return -1; + } + /* Ignore other errors */ + } + else { + /* CREATE attribute, that already exists */ + if (flags & XATTR_CREATE) { + errno = EEXIST; + return -1; + } + } + } + retval = extattr_set_file(path, attrnamespace, attrname, value, size); + return (retval < 0) ? -1 : 0; +#elif defined(HAVE_XATTR_ATTR) + int myflags = 0; + char *attrname = strchr(name,'.') + 1; + + if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; + if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; + if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; + + retval = attr_set(path, attrname, (const char *)value, size, myflags); + if (retval < 0) { + if (errno == E2BIG) { + errno = ENAMETOOLONG; + } + } + return retval; +#elif defined(HAVE_ATTROPEN) + int myflags = O_RDWR; + int attrfd; + if (flags & XATTR_CREATE) myflags |= O_EXCL; + if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; + attrfd = solaris_attropen(path, name, myflags, (mode_t) SOLARIS_ATTRMODE); + if (attrfd >= 0) { + retval = solaris_write_xattr(attrfd, value, size); + close(attrfd); + } + return retval; +#else + errno = ENOSYS; + return -1; +#endif +} + +int rep_fsetxattr (int filedes, const char *name, const void *value, size_t size, int flags) +{ + int retval = -1; +#if defined(HAVE_XATTR_XATTR) +#ifndef XATTR_ADDITIONAL_OPTIONS + retval = fsetxattr(filedes, name, value, size, flags); + if (retval < 0) { + if (errno == ENOSPC) { + errno = ENAMETOOLONG; + } + } + return retval; +#else +/* So that we do not recursively call this function */ +#undef fsetxattr + retval = fsetxattr(filedes, name, value, size, 0, flags); + if (retval < 0) { + if (errno == E2BIG) { + errno = ENAMETOOLONG; + } + } + return retval; +#endif +#elif defined(HAVE_XATTR_EA) + if (flags) { + retval = fgetea(filedes, name, NULL, 0); + if (retval < 0) { + if (flags & XATTR_REPLACE && errno == ENOATTR) { + return -1; + } + } else { + if (flags & XATTR_CREATE) { + errno = EEXIST; + return -1; + } + } + } + retval = fsetea(filedes, name, value, size, 0); + return retval; +#elif defined(HAVE_XATTR_EXTATTR) + int attrnamespace; + const char *attrname; + + if (strncmp(name, "system.", 7) == 0) { + attrnamespace = EXTATTR_NAMESPACE_SYSTEM; + attrname = name + 7; + } else if (strncmp(name, "user.", 5) == 0) { + attrnamespace = EXTATTR_NAMESPACE_USER; + attrname = name + 5; + } else { + errno = EINVAL; + return -1; + } + + if (flags) { + /* Check attribute existence */ + retval = extattr_get_fd(filedes, attrnamespace, attrname, NULL, 0); + if (retval < 0) { + /* REPLACE attribute, that doesn't exist */ + if (flags & XATTR_REPLACE && errno == ENOATTR) { + errno = ENOATTR; + return -1; + } + /* Ignore other errors */ + } + else { + /* CREATE attribute, that already exists */ + if (flags & XATTR_CREATE) { + errno = EEXIST; + return -1; + } + } + } + retval = extattr_set_fd(filedes, attrnamespace, attrname, value, size); + return (retval < 0) ? -1 : 0; +#elif defined(HAVE_XATTR_ATTR) + int myflags = 0; + char *attrname = strchr(name,'.') + 1; + + if (strncmp(name, "system", 6) == 0) myflags |= ATTR_ROOT; + if (flags & XATTR_CREATE) myflags |= ATTR_CREATE; + if (flags & XATTR_REPLACE) myflags |= ATTR_REPLACE; + + return attr_setf(filedes, attrname, (const char *)value, size, myflags); +#elif defined(HAVE_ATTROPEN) + int myflags = O_RDWR | O_XATTR; + int attrfd; + if (flags & XATTR_CREATE) myflags |= O_EXCL; + if (!(flags & XATTR_REPLACE)) myflags |= O_CREAT; + attrfd = solaris_openat(filedes, name, myflags, (mode_t) SOLARIS_ATTRMODE); + if (attrfd >= 0) { + retval = solaris_write_xattr(attrfd, value, size); + close(attrfd); + } + return retval; +#else + errno = ENOSYS; + return -1; +#endif +} + +/************************************************************************** + helper functions for Solaris' EA support +****************************************************************************/ +#ifdef HAVE_ATTROPEN +static ssize_t solaris_read_xattr(int attrfd, void *value, size_t size) +{ + struct stat sbuf; + + if (fstat(attrfd, &sbuf) == -1) { + errno = ENOATTR; + return -1; + } + + /* This is to return the current size of the named extended attribute */ + if (size == 0) { + return sbuf.st_size; + } + + /* check size and read xattr */ + if (sbuf.st_size > size) { + errno = ERANGE; + return -1; + } + + return read(attrfd, value, sbuf.st_size); +} + +static ssize_t solaris_list_xattr(int attrdirfd, char *list, size_t size) +{ + ssize_t len = 0; + DIR *dirp; + struct dirent *de; + int newfd = dup(attrdirfd); + /* CAUTION: The originating file descriptor should not be + used again following the call to fdopendir(). + For that reason we dup() the file descriptor + here to make things more clear. */ + dirp = fdopendir(newfd); + + while ((de = readdir(dirp))) { + size_t listlen = strlen(de->d_name) + 1; + if (!strcmp(de->d_name, ".") || !strcmp(de->d_name, "..")) { + /* we don't want "." and ".." here: */ + continue; + } + + if (size == 0) { + /* return the current size of the list of extended attribute names*/ + len += listlen; + } else { + /* check size and copy entrieѕ + nul into list. */ + if ((len + listlen) > size) { + errno = ERANGE; + len = -1; + break; + } else { + strlcpy(list + len, de->d_name, listlen); + len += listlen; + } + } + } + + if (closedir(dirp) == -1) { + return -1; + } + return len; +} + +static int solaris_unlinkat(int attrdirfd, const char *name) +{ + if (unlinkat(attrdirfd, name, 0) == -1) { + if (errno == ENOENT) { + errno = ENOATTR; + } + return -1; + } + return 0; +} + +static int solaris_attropen(const char *path, const char *attrpath, int oflag, mode_t mode) +{ + int filedes = attropen(path, attrpath, oflag, mode); + if (filedes == -1) { + if (errno == EINVAL) { + errno = ENOTSUP; + } else { + errno = ENOATTR; + } + } + return filedes; +} + +static int solaris_openat(int fildes, const char *path, int oflag, mode_t mode) +{ + int filedes = openat(fildes, path, oflag, mode); + if (filedes == -1) { + if (errno == EINVAL) { + errno = ENOTSUP; + } else { + errno = ENOATTR; + } + } + return filedes; +} + +static int solaris_write_xattr(int attrfd, const char *value, size_t size) +{ + if ((ftruncate(attrfd, 0) == 0) && (write(attrfd, value, size) == size)) { + return 0; + } else { + return -1; + } +} +#endif /*HAVE_ATTROPEN*/ + + diff --git a/man/talloc.3.xml b/man/talloc.3.xml new file mode 100644 index 0000000..c51061f --- /dev/null +++ b/man/talloc.3.xml @@ -0,0 +1,814 @@ +<?xml version="1.0"?> +<!DOCTYPE refentry PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN" "http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd"> +<refentry> + <refentryinfo><date>2015-04-10</date></refentryinfo> + <refmeta> + <refentrytitle>talloc</refentrytitle> + <manvolnum>3</manvolnum> + <refmiscinfo class="source">Samba</refmiscinfo> + <refmiscinfo class="manual">System Administration tools</refmiscinfo> + <refmiscinfo class="version">4.0</refmiscinfo> + </refmeta> + <refnamediv> + <refname>talloc</refname> +<refpurpose>hierarchical reference counted memory pool system with destructors</refpurpose> + </refnamediv> + <refsynopsisdiv> +<synopsis>#include <talloc.h></synopsis> + </refsynopsisdiv> + <refsect1><title>DESCRIPTION</title> + <para> + If you are used to talloc from Samba3 then please read this + carefully, as talloc has changed a lot. + </para> + <para> + The new talloc is a hierarchical, reference counted memory pool + system with destructors. Quite a mouthful really, but not too bad + once you get used to it. + </para> + <para> + Perhaps the biggest change from Samba3 is that there is no + distinction between a "talloc context" and a "talloc pointer". Any + pointer returned from talloc() is itself a valid talloc context. + This means you can do this: + </para> + <programlisting> + struct foo *X = talloc(mem_ctx, struct foo); + X->name = talloc_strdup(X, "foo"); + </programlisting> + <para> + and the pointer <literal role="code">X->name</literal> + would be a "child" of the talloc context <literal + role="code">X</literal> which is itself a child of + <literal role="code">mem_ctx</literal>. So if you do + <literal role="code">talloc_free(mem_ctx)</literal> then + it is all destroyed, whereas if you do <literal + role="code">talloc_free(X)</literal> then just <literal + role="code">X</literal> and <literal + role="code">X->name</literal> are destroyed, and if + you do <literal + role="code">talloc_free(X->name)</literal> then just + the name element of <literal role="code">X</literal> is + destroyed. + </para> + <para> + If you think about this, then what this effectively gives you is an + n-ary tree, where you can free any part of the tree with + talloc_free(). + </para> + <para> + If you find this confusing, then I suggest you run the <literal + role="code">testsuite</literal> program to watch talloc + in action. You may also like to add your own tests to <literal + role="code">testsuite.c</literal> to clarify how some + particular situation is handled. + </para> + </refsect1> + <refsect1><title>TALLOC API</title> + <para> + The following is a complete guide to the talloc API. Read it all at + least twice. + </para> + <refsect2><title>(type *)talloc(const void *ctx, type);</title> + <para> + The talloc() macro is the core of the talloc library. It takes a + memory <emphasis role="italic">ctx</emphasis> and a <emphasis + role="italic">type</emphasis>, and returns a pointer to a new + area of memory of the given <emphasis + role="italic">type</emphasis>. + </para> + <para> + The returned pointer is itself a talloc context, so you can use + it as the <emphasis role="italic">ctx</emphasis> argument to more + calls to talloc() if you wish. + </para> + <para> + The returned pointer is a "child" of the supplied context. This + means that if you talloc_free() the <emphasis + role="italic">ctx</emphasis> then the new child disappears as + well. Alternatively you can free just the child. + </para> + <para> + The <emphasis role="italic">ctx</emphasis> argument to talloc() + can be NULL, in which case a new top level context is created. + </para> + </refsect2> + <refsect2><title>void *talloc_size(const void *ctx, size_t size);</title> + <para> + The function talloc_size() should be used when you don't have a + convenient type to pass to talloc(). Unlike talloc(), it is not + type safe (as it returns a void *), so you are on your own for + type checking. + </para> + </refsect2> + <refsect2><title>(typeof(ptr)) talloc_ptrtype(const void *ctx, ptr);</title> + <para> + The talloc_ptrtype() macro should be used when you have a pointer and + want to allocate memory to point at with this pointer. When compiling + with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() + and talloc_get_name() will return the current location in the source file. + and not the type. + </para> + </refsect2> + <refsect2><title>int talloc_free(void *ptr);</title> + <para> + The talloc_free() function frees a piece of talloc memory, and + all its children. You can call talloc_free() on any pointer + returned by talloc(). + </para> + <para> + The return value of talloc_free() indicates success or failure, + with 0 returned for success and -1 for failure. The only + possible failure condition is if <emphasis + role="italic">ptr</emphasis> had a destructor attached to it and + the destructor returned -1. See <link + linkend="talloc_set_destructor"><quote>talloc_set_destructor()</quote></link> + for details on destructors. + </para> + <para> + If this pointer has an additional parent when talloc_free() is + called then the memory is not actually released, but instead the + most recently established parent is destroyed. See <link + linkend="talloc_reference"><quote>talloc_reference()</quote></link> + for details on establishing additional parents. + </para> + <para> + For more control on which parent is removed, see <link + linkend="talloc_unlink"><quote>talloc_unlink()</quote></link>. + </para> + <para> + talloc_free() operates recursively on its children. + </para> + <para> + From the 2.0 version of talloc, as a special case, + talloc_free() is refused on pointers that have more than one + parent, as talloc would have no way of knowing which parent + should be removed. To free a pointer that has more than one + parent please use talloc_unlink(). + </para> + <para> + To help you find problems in your code caused by this behaviour, if + you do try and free a pointer with more than one parent then the + talloc logging function will be called to give output like this: + </para> + <para> + <screen format="linespecific"> + ERROR: talloc_free with references at some_dir/source/foo.c:123 + reference at some_dir/source/other.c:325 + reference at some_dir/source/third.c:121 + </screen> + </para> + <para> + Please see the documentation for talloc_set_log_fn() and + talloc_set_log_stderr() for more information on talloc logging + functions. + </para> + </refsect2> + <refsect2 id="talloc_reference"><title>void *talloc_reference(const void *ctx, const void *ptr);</title> + <para> + The talloc_reference() function makes <emphasis + role="italic">ctx</emphasis> an additional parent of <emphasis + role="italic">ptr</emphasis>. + </para> + <para> + The return value of talloc_reference() is always the original + pointer <emphasis role="italic">ptr</emphasis>, unless talloc ran + out of memory in creating the reference in which case it will + return NULL (each additional reference consumes around 48 bytes + of memory on intel x86 platforms). + </para> + <para> + If <emphasis role="italic">ptr</emphasis> is NULL, then the + function is a no-op, and simply returns NULL. + </para> + <para> + After creating a reference you can free it in one of the + following ways: + </para> + <para> + <itemizedlist> + <listitem> + <para> + you can talloc_free() any parent of the original pointer. + That will reduce the number of parents of this pointer by 1, + and will cause this pointer to be freed if it runs out of + parents. + </para> + </listitem> + <listitem> + <para> + you can talloc_free() the pointer itself if it has at maximum one + parent. This behaviour has been changed since the release of version + 2.0. Further information in the description of "talloc_free". + </para> + </listitem> + </itemizedlist> + </para> + <para> + For more control on which parent to remove, see <link + linkend="talloc_unlink"><quote>talloc_unlink()</quote></link>. + </para> + </refsect2> + <refsect2 id="talloc_unlink"><title>int talloc_unlink(const void *ctx, void *ptr);</title> + <para> + The talloc_unlink() function removes a specific parent from + <emphasis role="italic">ptr</emphasis>. The <emphasis + role="italic">ctx</emphasis> passed must either be a context used + in talloc_reference() with this pointer, or must be a direct + parent of ptr. + </para> + <para> + Note that if the parent has already been removed using + talloc_free() then this function will fail and will return -1. + Likewise, if <emphasis role="italic">ptr</emphasis> is NULL, then + the function will make no modifications and return -1. + </para> + <para> + Usually you can just use talloc_free() instead of + talloc_unlink(), but sometimes it is useful to have the + additional control on which parent is removed. + </para> + </refsect2> + <refsect2 id="talloc_set_destructor"><title>void talloc_set_destructor(const void *ptr, int (*destructor)(void *));</title> + <para> + The function talloc_set_destructor() sets the <emphasis + role="italic">destructor</emphasis> for the pointer <emphasis + role="italic">ptr</emphasis>. A <emphasis + role="italic">destructor</emphasis> is a function that is called + when the memory used by a pointer is about to be released. The + destructor receives <emphasis role="italic">ptr</emphasis> as an + argument, and should return 0 for success and -1 for failure. + </para> + <para> + The <emphasis role="italic">destructor</emphasis> can do anything + it wants to, including freeing other pieces of memory. A common + use for destructors is to clean up operating system resources + (such as open file descriptors) contained in the structure the + destructor is placed on. + </para> + <para> + You can only place one destructor on a pointer. If you need more + than one destructor then you can create a zero-length child of + the pointer and place an additional destructor on that. + </para> + <para> + To remove a destructor call talloc_set_destructor() with NULL for + the destructor. + </para> + <para> + If your destructor attempts to talloc_free() the pointer that it + is the destructor for then talloc_free() will return -1 and the + free will be ignored. This would be a pointless operation + anyway, as the destructor is only called when the memory is just + about to go away. + </para> + </refsect2> + <refsect2><title>int talloc_increase_ref_count(const void *<emphasis role="italic">ptr</emphasis>);</title> + <para> + The talloc_increase_ref_count(<emphasis + role="italic">ptr</emphasis>) function is exactly equivalent to: + </para> + <programlisting>talloc_reference(NULL, ptr);</programlisting> + <para> + You can use either syntax, depending on which you think is + clearer in your code. + </para> + <para> + It returns 0 on success and -1 on failure. + </para> + </refsect2> + <refsect2><title>size_t talloc_reference_count(const void *<emphasis role="italic">ptr</emphasis>);</title> + <para> + Return the number of references to the pointer. + </para> + </refsect2> + <refsect2 id="talloc_set_name"><title>void talloc_set_name(const void *ptr, const char *fmt, ...);</title> + <para> + Each talloc pointer has a "name". The name is used principally + for debugging purposes, although it is also possible to set and + get the name on a pointer in as a way of "marking" pointers in + your code. + </para> + <para> + The main use for names on pointer is for "talloc reports". See + <link + linkend="talloc_report"><quote>talloc_report_depth_cb()</quote></link>, + <link + linkend="talloc_report"><quote>talloc_report_depth_file()</quote></link>, + <link + linkend="talloc_report"><quote>talloc_report()</quote></link> + <link + linkend="talloc_report"><quote>talloc_report()</quote></link> + and <link + linkend="talloc_report_full"><quote>talloc_report_full()</quote></link> + for details. Also see <link + linkend="talloc_enable_leak_report"><quote>talloc_enable_leak_report()</quote></link> + and <link + linkend="talloc_enable_leak_report_full"><quote>talloc_enable_leak_report_full()</quote></link>. + </para> + <para> + The talloc_set_name() function allocates memory as a child of the + pointer. It is logically equivalent to: + </para> + <programlisting>talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...));</programlisting> + <para> + Note that multiple calls to talloc_set_name() will allocate more + memory without releasing the name. All of the memory is released + when the ptr is freed using talloc_free(). + </para> + </refsect2> + <refsect2><title>void talloc_set_name_const(const void *<emphasis role="italic">ptr</emphasis>, const char *<emphasis role="italic">name</emphasis>);</title> + <para> + The function talloc_set_name_const() is just like + talloc_set_name(), but it takes a string constant, and is much + faster. It is extensively used by the "auto naming" macros, such + as talloc_p(). + </para> + <para> + This function does not allocate any memory. It just copies the + supplied pointer into the internal representation of the talloc + ptr. This means you must not pass a <emphasis + role="italic">name</emphasis> pointer to memory that will + disappear before <emphasis role="italic">ptr</emphasis> is freed + with talloc_free(). + </para> + </refsect2> + <refsect2><title>void *talloc_named(const void *<emphasis role="italic">ctx</emphasis>, size_t <emphasis role="italic">size</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, ...);</title> + <para> + The talloc_named() function creates a named talloc pointer. It + is equivalent to: + </para> + <programlisting>ptr = talloc_size(ctx, size); +talloc_set_name(ptr, fmt, ....);</programlisting> + </refsect2> + <refsect2><title>void *talloc_named_const(const void *<emphasis role="italic">ctx</emphasis>, size_t <emphasis role="italic">size</emphasis>, const char *<emphasis role="italic">name</emphasis>);</title> + <para> + This is equivalent to: + </para> + <programlisting>ptr = talloc_size(ctx, size); +talloc_set_name_const(ptr, name);</programlisting> + </refsect2> + <refsect2><title>const char *talloc_get_name(const void *<emphasis role="italic">ptr</emphasis>);</title> + <para> + This returns the current name for the given talloc pointer, + <emphasis role="italic">ptr</emphasis>. See <link + linkend="talloc_set_name"><quote>talloc_set_name()</quote></link> + for details. + </para> + </refsect2> + <refsect2><title>void *talloc_init(const char *<emphasis role="italic">fmt</emphasis>, ...);</title> + <para> + This function creates a zero length named talloc context as a top + level context. It is equivalent to: + </para> + <programlisting>talloc_named(NULL, 0, fmt, ...);</programlisting> + </refsect2> + <refsect2><title>void *talloc_new(void *<emphasis role="italic">ctx</emphasis>);</title> + <para> + This is a utility macro that creates a new memory context hanging + off an existing context, automatically naming it "talloc_new: + __location__" where __location__ is the source line it is called + from. It is particularly useful for creating a new temporary + working context. + </para> + </refsect2> + <refsect2><title>(<emphasis role="italic">type</emphasis> *)talloc_realloc(const void *<emphasis role="italic">ctx</emphasis>, void *<emphasis role="italic">ptr</emphasis>, <emphasis role="italic">type</emphasis>, <emphasis role="italic">count</emphasis>);</title> + <para> + The talloc_realloc() macro changes the size of a talloc pointer. + It has the following equivalences: + </para> + <programlisting>talloc_realloc(ctx, NULL, type, 1) ==> talloc(ctx, type); +talloc_realloc(ctx, ptr, type, 0) ==> talloc_free(ptr);</programlisting> + <para> + The <emphasis role="italic">ctx</emphasis> argument is only used + if <emphasis role="italic">ptr</emphasis> is not NULL, otherwise + it is ignored. + </para> + <para> + talloc_realloc() returns the new pointer, or NULL on failure. + The call will fail either due to a lack of memory, or because the + pointer has more than one parent (see <link + linkend="talloc_reference"><quote>talloc_reference()</quote></link>). + </para> + </refsect2> + <refsect2><title>void *talloc_realloc_size(const void *ctx, void *ptr, size_t size);</title> + <para> + the talloc_realloc_size() function is useful when the type is not + known so the type-safe talloc_realloc() cannot be used. + </para> + </refsect2> + <refsect2><title>TYPE *talloc_steal(const void *<emphasis role="italic">new_ctx</emphasis>, const TYPE *<emphasis role="italic">ptr</emphasis>);</title> + <para> + The talloc_steal() function changes the parent context of a + talloc pointer. It is typically used when the context that the + pointer is currently a child of is going to be freed and you wish + to keep the memory for a longer time. + </para> + <para> + The talloc_steal() function returns the pointer that you pass it. + It does not have any failure modes. + </para> + <para> + It is possible to produce loops in the parent/child + relationship if you are not careful with talloc_steal(). No + guarantees are provided as to your sanity or the safety of your + data if you do this. + </para> + <para> + Note that if you try and call talloc_steal() on a pointer that has + more than one parent then the result is ambiguous. Talloc will choose + to remove the parent that is currently indicated by talloc_parent() + and replace it with the chosen parent. You will also get a message + like this via the talloc logging functions: + </para> + <para> + <screen format="linespecific"> + WARNING: talloc_steal with references at some_dir/source/foo.c:123 + reference at some_dir/source/other.c:325 + reference at some_dir/source/third.c:121 + </screen> + </para> + <para> + To unambiguously change the parent of a pointer please see + the + function <link linkend="talloc_reference"><quote>talloc_reparent()</quote></link>. See + the talloc_set_log_fn() documentation for more information + on talloc logging. + </para> + </refsect2> + <refsect2><title>TYPE *talloc_reparent(const void *<emphasis role="italic">old_parent</emphasis>, const void *<emphasis role="italic">new_parent</emphasis>, const TYPE *<emphasis role="italic">ptr</emphasis>);</title> + <para> + The talloc_reparent() function changes the parent context of a talloc + pointer. It is typically used when the context that the pointer is + currently a child of is going to be freed and you wish to keep the + memory for a longer time. + </para> + <para> + The talloc_reparent() function returns the pointer that you pass it. It + does not have any failure modes. + </para> + <para> + The difference between talloc_reparent() and talloc_steal() is that + talloc_reparent() can specify which parent you wish to change. This is + useful when a pointer has multiple parents via references. + </para> + </refsect2> + <refsect2><title>TYPE *talloc_move(const void *<emphasis role="italic">new_ctx</emphasis>, TYPE **<emphasis role="italic">ptr</emphasis>);</title> + <para> + The talloc_move() function is a wrapper around + talloc_steal() which zeros the source pointer after the + move. This avoids a potential source of bugs where a + programmer leaves a pointer in two structures, and uses the + pointer from the old structure after it has been moved to a + new one. + </para> + </refsect2> + <refsect2><title>size_t talloc_total_size(const void *<emphasis role="italic">ptr</emphasis>);</title> + <para> + The talloc_total_size() function returns the total size in bytes + used by this pointer and all child pointers. Mostly useful for + debugging. + </para> + <para> + Passing NULL is allowed, but it will only give a meaningful + result if talloc_enable_leak_report() or + talloc_enable_leak_report_full() has been called. + </para> + </refsect2> + <refsect2><title>size_t talloc_total_blocks(const void *<emphasis role="italic">ptr</emphasis>);</title> + <para> + The talloc_total_blocks() function returns the total memory block + count used by this pointer and all child pointers. Mostly useful + for debugging. + </para> + <para> + Passing NULL is allowed, but it will only give a meaningful + result if talloc_enable_leak_report() or + talloc_enable_leak_report_full() has been called. + </para> + </refsect2> + <refsect2 id="talloc_report"><title>void talloc_report(const void *ptr, FILE *f);</title> + <para> + The talloc_report() function prints a summary report of all + memory used by <emphasis role="italic">ptr</emphasis>. One line + of report is printed for each immediate child of ptr, showing the + total memory and number of blocks used by that child. + </para> + <para> + You can pass NULL for the pointer, in which case a report is + printed for the top level memory context, but only if + talloc_enable_leak_report() or talloc_enable_leak_report_full() + has been called. + </para> + </refsect2> + <refsect2 id="talloc_report_full"><title>void talloc_report_full(const void *<emphasis role="italic">ptr</emphasis>, FILE *<emphasis role="italic">f</emphasis>);</title> + <para> + This provides a more detailed report than talloc_report(). It + will recursively print the entire tree of memory referenced by + the pointer. References in the tree are shown by giving the name + of the pointer that is referenced. + </para> + <para> + You can pass NULL for the pointer, in which case a report is + printed for the top level memory context, but only if + talloc_enable_leak_report() or talloc_enable_leak_report_full() + has been called. + </para> + </refsect2> + <refsect2 id="talloc_report_depth_cb"> + <funcsynopsis><funcprototype> + <funcdef>void <function>talloc_report_depth_cb</function></funcdef> + <paramdef><parameter>const void *ptr</parameter></paramdef> + <paramdef><parameter>int depth</parameter></paramdef> + <paramdef><parameter>int max_depth</parameter></paramdef> + <paramdef><parameter>void (*callback)(const void *ptr, int depth, int max_depth, int is_ref, void *priv)</parameter></paramdef> + <paramdef><parameter>void *priv</parameter></paramdef> + </funcprototype></funcsynopsis> + <para> + This provides a more flexible reports than talloc_report(). It + will recursively call the callback for the entire tree of memory + referenced by the pointer. References in the tree are passed with + <emphasis role="italic">is_ref = 1</emphasis> and the pointer that is referenced. + </para> + <para> + You can pass NULL for the pointer, in which case a report is + printed for the top level memory context, but only if + talloc_enable_leak_report() or talloc_enable_leak_report_full() + has been called. + </para> + <para> + The recursion is stopped when depth >= max_depth. + max_depth = -1 means only stop at leaf nodes. + </para> + </refsect2> + <refsect2 id="talloc_report_depth_file"> + <funcsynopsis><funcprototype> + <funcdef>void <function>talloc_report_depth_file</function></funcdef> + <paramdef><parameter>const void *ptr</parameter></paramdef> + <paramdef><parameter>int depth</parameter></paramdef> + <paramdef><parameter>int max_depth</parameter></paramdef> + <paramdef><parameter>FILE *f</parameter></paramdef> + </funcprototype></funcsynopsis> + <para> + This provides a more flexible reports than talloc_report(). It + will let you specify the depth and max_depth. + </para> + </refsect2> + <refsect2 id="talloc_enable_leak_report"><title>void talloc_enable_leak_report(void);</title> + <para> + This enables calling of talloc_report(NULL, stderr) when the + program exits. In Samba4 this is enabled by using the + --leak-report command line option. + </para> + <para> + For it to be useful, this function must be called before any + other talloc function as it establishes a "null context" that + acts as the top of the tree. If you don't call this function + first then passing NULL to talloc_report() or + talloc_report_full() won't give you the full tree printout. + </para> + <para> + Here is a typical talloc report: + </para> + <screen format="linespecific">talloc report on 'null_context' (total 267 bytes in 15 blocks) +libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks +libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks +iconv(UTF8,CP850) contains 42 bytes in 2 blocks +libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks +iconv(CP850,UTF8) contains 42 bytes in 2 blocks +iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks +iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks + </screen> + </refsect2> + <refsect2 id="talloc_enable_leak_report_full"><title>void talloc_enable_leak_report_full(void);</title> + <para> + This enables calling of talloc_report_full(NULL, stderr) when the + program exits. In Samba4 this is enabled by using the + --leak-report-full command line option. + </para> + <para> + For it to be useful, this function must be called before any + other talloc function as it establishes a "null context" that + acts as the top of the tree. If you don't call this function + first then passing NULL to talloc_report() or + talloc_report_full() won't give you the full tree printout. + </para> + <para> + Here is a typical full report: + </para> + <screen format="linespecific">full talloc report on 'root' (total 18 bytes in 8 blocks) +p1 contains 18 bytes in 7 blocks (ref 0) + r1 contains 13 bytes in 2 blocks (ref 0) + reference to: p2 + p2 contains 1 bytes in 1 blocks (ref 1) + x3 contains 1 bytes in 1 blocks (ref 0) + x2 contains 1 bytes in 1 blocks (ref 0) + x1 contains 1 bytes in 1 blocks (ref 0) + </screen> + </refsect2> + <refsect2><title>(<emphasis role="italic">type</emphasis> *)talloc_zero(const void *<emphasis role="italic">ctx</emphasis>, <emphasis role="italic">type</emphasis>);</title> + <para> + The talloc_zero() macro is equivalent to: + </para> + <programlisting>ptr = talloc(ctx, type); +if (ptr) memset(ptr, 0, sizeof(type));</programlisting> + </refsect2> + <refsect2><title>void *talloc_zero_size(const void *<emphasis role="italic">ctx</emphasis>, size_t <emphasis role="italic">size</emphasis>)</title> + <para> + The talloc_zero_size() function is useful when you don't have a + known type. + </para> + </refsect2> + <refsect2><title>void *talloc_memdup(const void *<emphasis role="italic">ctx</emphasis>, const void *<emphasis role="italic">p</emphasis>, size_t size);</title> + <para> + The talloc_memdup() function is equivalent to: + </para> + <programlisting>ptr = talloc_size(ctx, size); +if (ptr) memcpy(ptr, p, size);</programlisting> + </refsect2> + <refsect2><title>char *talloc_strdup(const void *<emphasis role="italic">ctx</emphasis>, const char *<emphasis role="italic">p</emphasis>);</title> + <para> + The talloc_strdup() function is equivalent to: + </para> + <programlisting>ptr = talloc_size(ctx, strlen(p)+1); +if (ptr) memcpy(ptr, p, strlen(p)+1);</programlisting> + <para> + This function sets the name of the new pointer to the passed + string. This is equivalent to: + </para> + <programlisting>talloc_set_name_const(ptr, ptr)</programlisting> + </refsect2> + <refsect2><title>char *talloc_strndup(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">p</emphasis>, size_t <emphasis role="italic">n</emphasis>);</title> + <para> + The talloc_strndup() function is the talloc equivalent of the C + library function strndup(3). + </para> + <para> + This function sets the name of the new pointer to the passed + string. This is equivalent to: + </para> + <programlisting>talloc_set_name_const(ptr, ptr)</programlisting> + </refsect2> + <refsect2><title>char *talloc_vasprintf(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, va_list <emphasis role="italic">ap</emphasis>);</title> + <para> + The talloc_vasprintf() function is the talloc equivalent of the C + library function vasprintf(3). + </para> + <para> + This function sets the name of the new pointer to the new + string. This is equivalent to: + </para> + <programlisting>talloc_set_name_const(ptr, ptr)</programlisting> + </refsect2> + <refsect2><title>char *talloc_asprintf(const void *<emphasis role="italic">t</emphasis>, const char *<emphasis role="italic">fmt</emphasis>, ...);</title> + <para> + The talloc_asprintf() function is the talloc equivalent of the C + library function asprintf(3). + </para> + <para> + This function sets the name of the new pointer to the passed + string. This is equivalent to: + </para> + <programlisting>talloc_set_name_const(ptr, ptr)</programlisting> + </refsect2> + <refsect2><title>char *talloc_asprintf_append(char *s, const char *fmt, ...);</title> + <para> + The talloc_asprintf_append() function appends the given formatted + string to the given string. + </para> + <para> + This function sets the name of the new pointer to the new + string. This is equivalent to: + </para> + <programlisting>talloc_set_name_const(ptr, ptr)</programlisting> + </refsect2> + <refsect2><title>(type *)talloc_array(const void *ctx, type, unsigned int count);</title> + <para> + The talloc_array() macro is equivalent to: + </para> + <programlisting>(type *)talloc_size(ctx, sizeof(type) * count);</programlisting> + <para> + except that it provides integer overflow protection for the + multiply, returning NULL if the multiply overflows. + </para> + </refsect2> + <refsect2><title>void *talloc_array_size(const void *ctx, size_t size, unsigned int count);</title> + <para> + The talloc_array_size() function is useful when the type is not + known. It operates in the same way as talloc_array(), but takes a + size instead of a type. + </para> + </refsect2> + <refsect2><title>(typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, unsigned int count);</title> + <para> + The talloc_ptrtype() macro should be used when you have a pointer to an array + and want to allocate memory of an array to point at with this pointer. When compiling + with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() + and talloc_get_name() will return the current location in the source file. + and not the type. + </para> + </refsect2> + <refsect2><title>void *talloc_realloc_fn(const void *ctx, void *ptr, size_t size)</title> + <para> + This is a non-macro version of talloc_realloc(), which is useful + as libraries sometimes want a realloc function pointer. A + realloc(3) implementation encapsulates the functionality of + malloc(3), free(3) and realloc(3) in one call, which is why it is + useful to be able to pass around a single function pointer. + </para> + </refsect2> + <refsect2><title>void *talloc_autofree_context(void);</title> + <para> + This is a handy utility function that returns a talloc context + which will be automatically freed on program exit. This can be + used to reduce the noise in memory leak reports. + </para> + </refsect2> + <refsect2><title>void *talloc_check_name(const void *ptr, const char *name);</title> + <para> + This function checks if a pointer has the specified <emphasis + role="italic">name</emphasis>. If it does then the pointer is + returned. It it doesn't then NULL is returned. + </para> + </refsect2> + <refsect2><title>(type *)talloc_get_type(const void *ptr, type);</title> + <para> + This macro allows you to do type checking on talloc pointers. It + is particularly useful for void* private pointers. It is + equivalent to this: + </para> + <programlisting>(type *)talloc_check_name(ptr, #type)</programlisting> + </refsect2> + <refsect2><title>talloc_set_type(const void *ptr, type);</title> + <para> + This macro allows you to force the name of a pointer to be a + particular <emphasis>type</emphasis>. This can be + used in conjunction with talloc_get_type() to do type checking on + void* pointers. + </para> + <para> + It is equivalent to this: + </para> + <programlisting>talloc_set_name_const(ptr, #type)</programlisting> + </refsect2> + <refsect2><title>talloc_set_log_fn(void (*log_fn)(const char *message));</title> + <para> + This function sets a logging function that talloc will use for + warnings and errors. By default talloc will not print any warnings or + errors. + </para> + </refsect2> + <refsect2><title>talloc_set_log_stderr(void);</title> + <para> + This sets the talloc log function to write log messages to stderr + </para> + </refsect2> + </refsect1> + <refsect1><title>PERFORMANCE</title> + <para> + All the additional features of talloc(3) over malloc(3) do come at a + price. We have a simple performance test in Samba4 that measures + talloc() versus malloc() performance, and it seems that talloc() is + about 10% slower than malloc() on my x86 Debian Linux box. For + Samba, the great reduction in code complexity that we get by using + talloc makes this worthwhile, especially as the total overhead of + talloc/malloc in Samba is already quite small. + </para> + </refsect1> + <refsect1><title>SEE ALSO</title> + <para> + malloc(3), strndup(3), vasprintf(3), asprintf(3), + <ulink url="http://talloc.samba.org/"/> + </para> + </refsect1> + + <refsect1><title>AUTHOR</title> + <para> The original Samba software and related utilities were + created by Andrew Tridgell. Samba is now developed by the + Samba Team as an Open Source project similar to the way the + Linux kernel is developed. + </para> + </refsect1> + + <refsect1><title>COPYRIGHT/LICENSE</title> + <para> + Copyright (C) Andrew Tridgell 2004 + </para> + <para> + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU Lesser General Public License as + published by the Free Software Foundation; either version 3 of the + License, or (at your option) any later version. + </para> + <para> + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + </para> + <para> + You should have received a copy of the GNU General Public License + along with this program; if not, see http://www.gnu.org/licenses/. + </para> + </refsect1> +</refentry> diff --git a/pytalloc-util.pc.in b/pytalloc-util.pc.in new file mode 100644 index 0000000..06f83e2 --- /dev/null +++ b/pytalloc-util.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: pytalloc-util@PYTHON_SO_ABI_FLAG@ +Description: Utility functions for using talloc objects with Python +Version: @TALLOC_VERSION@ +Libs: @LIB_RPATH@ -L${libdir} -lpytalloc-util@PYTHON_LIBNAME_SO_ABI_FLAG@ +Cflags: -I${includedir} +URL: http://talloc.samba.org/ diff --git a/pytalloc.c b/pytalloc.c new file mode 100644 index 0000000..f6f0681 --- /dev/null +++ b/pytalloc.c @@ -0,0 +1,269 @@ +/* + Unix SMB/CIFS implementation. + Python Talloc Module + Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2010-2011 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "lib/replace/system/python.h" +#include <talloc.h> +#include <pytalloc.h> +#include "pytalloc_private.h" + +static PyTypeObject TallocObject_Type; + +/* print a talloc tree report for a talloc python object */ +static PyObject *pytalloc_report_full(PyObject *self, PyObject *args) +{ + PyObject *py_obj = Py_None; + + if (!PyArg_ParseTuple(args, "|O", &py_obj)) + return NULL; + + if (py_obj == Py_None) { + talloc_report_full(NULL, stdout); + } else { + talloc_report_full(pytalloc_get_mem_ctx(py_obj), stdout); + } + Py_RETURN_NONE; +} + +/* enable null tracking */ +static PyObject *pytalloc_enable_null_tracking(PyObject *self, + PyObject *Py_UNUSED(ignored)) +{ + talloc_enable_null_tracking(); + Py_RETURN_NONE; +} + +/* return the number of talloc blocks */ +static PyObject *pytalloc_total_blocks(PyObject *self, PyObject *args) +{ + PyObject *py_obj = Py_None; + + if (!PyArg_ParseTuple(args, "|O", &py_obj)) + return NULL; + + if (py_obj == Py_None) { + return PyLong_FromLong(talloc_total_blocks(NULL)); + } + + return PyLong_FromLong(talloc_total_blocks(pytalloc_get_mem_ctx(py_obj))); +} + +static PyMethodDef talloc_methods[] = { + { "report_full", (PyCFunction)pytalloc_report_full, METH_VARARGS, + "show a talloc tree for an object"}, + { "enable_null_tracking", (PyCFunction)pytalloc_enable_null_tracking, METH_NOARGS, + "enable tracking of the NULL object"}, + { "total_blocks", (PyCFunction)pytalloc_total_blocks, METH_VARARGS, + "return talloc block count"}, + {0} +}; + +/** + * Default (but only slightly more useful than the default) implementation of Repr(). + */ +static PyObject *pytalloc_default_repr(PyObject *obj) +{ + pytalloc_Object *talloc_obj = (pytalloc_Object *)obj; + PyTypeObject *type = (PyTypeObject*)PyObject_Type(obj); + + return PyUnicode_FromFormat("<%s talloc object at %p>", + type->tp_name, talloc_obj->ptr); +} + +/** + * Simple dealloc for talloc-wrapping PyObjects + */ +static void pytalloc_dealloc(PyObject* self) +{ + pytalloc_Object *obj = (pytalloc_Object *)self; + assert(talloc_unlink(NULL, obj->talloc_ctx) != -1); + obj->talloc_ctx = NULL; + self->ob_type->tp_free(self); +} + +/** + * Default objects do not support ordered comparisons, but talloc + * objects do, sorting by pointers clustered by type. + */ +static PyObject *pytalloc_default_richcmp(PyObject *obj1, PyObject *obj2, int op) +{ + void *ptr1; + void *ptr2; + if (Py_TYPE(obj1) == Py_TYPE(obj2)) { + /* When types match, compare pointers */ + ptr1 = pytalloc_get_ptr(obj1); + ptr2 = pytalloc_get_ptr(obj2); + } else if (PyObject_TypeCheck(obj2, &TallocObject_Type)) { + /* Otherwise, compare types */ + ptr1 = Py_TYPE(obj1); + ptr2 = Py_TYPE(obj2); + } else { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + switch (op) { + case Py_EQ: return PyBool_FromLong(ptr1 == ptr2); + case Py_NE: return PyBool_FromLong(ptr1 != ptr2); + case Py_LT: return PyBool_FromLong(ptr1 < ptr2); + case Py_GT: return PyBool_FromLong(ptr1 > ptr2); + case Py_LE: return PyBool_FromLong(ptr1 <= ptr2); + case Py_GE: return PyBool_FromLong(ptr1 >= ptr2); + } + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; +} + +static PyTypeObject TallocObject_Type = { + .tp_name = "talloc.Object", + .tp_doc = "Python wrapper for a talloc-maintained object.", + .tp_basicsize = sizeof(pytalloc_Object), + .tp_dealloc = (destructor)pytalloc_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_repr = pytalloc_default_repr, + .tp_richcompare = pytalloc_default_richcmp, +}; + +/** + * Default (but only slightly more useful than the default) implementation of Repr(). + */ +static PyObject *pytalloc_base_default_repr(PyObject *obj) +{ + pytalloc_BaseObject *talloc_obj = (pytalloc_BaseObject *)obj; + PyTypeObject *type = (PyTypeObject*)PyObject_Type(obj); + + return PyUnicode_FromFormat("<%s talloc based object at %p>", + type->tp_name, talloc_obj->ptr); +} + +/** + * Simple dealloc for talloc-wrapping PyObjects + */ +static void pytalloc_base_dealloc(PyObject* self) +{ + pytalloc_BaseObject *obj = (pytalloc_BaseObject *)self; + assert(talloc_unlink(NULL, obj->talloc_ctx) != -1); + obj->talloc_ctx = NULL; + self->ob_type->tp_free(self); +} + +/** + * Default objects do not support ordered comparisons, but talloc + * objects do, sorting by pointers clustered by type. + */ +static PyObject *pytalloc_base_default_richcmp(PyObject *obj1, PyObject *obj2, int op) +{ + void *ptr1; + void *ptr2; + if (Py_TYPE(obj1) == Py_TYPE(obj2)) { + /* When types match, compare pointers */ + ptr1 = pytalloc_get_ptr(obj1); + ptr2 = pytalloc_get_ptr(obj2); + } else if (PyObject_TypeCheck(obj2, &TallocObject_Type)) { + /* Otherwise, compare types */ + ptr1 = Py_TYPE(obj1); + ptr2 = Py_TYPE(obj2); + } else { + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; + } + switch (op) { + case Py_EQ: return PyBool_FromLong(ptr1 == ptr2); + case Py_NE: return PyBool_FromLong(ptr1 != ptr2); + case Py_LT: return PyBool_FromLong(ptr1 < ptr2); + case Py_GT: return PyBool_FromLong(ptr1 > ptr2); + case Py_LE: return PyBool_FromLong(ptr1 <= ptr2); + case Py_GE: return PyBool_FromLong(ptr1 >= ptr2); + } + Py_INCREF(Py_NotImplemented); + return Py_NotImplemented; +} + +static PyTypeObject TallocBaseObject_Type = { + .tp_name = "talloc.BaseObject", + .tp_doc = "Python wrapper for a talloc-maintained object.", + .tp_basicsize = sizeof(pytalloc_BaseObject), + .tp_dealloc = (destructor)pytalloc_base_dealloc, + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_repr = pytalloc_base_default_repr, + .tp_richcompare = pytalloc_base_default_richcmp, +}; + +static PyTypeObject TallocGenericObject_Type = { + .tp_name = "talloc.GenericObject", + .tp_doc = "Python wrapper for a talloc-maintained object.", + .tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + .tp_base = &TallocBaseObject_Type, + .tp_basicsize = sizeof(pytalloc_BaseObject), +}; + +#define MODULE_DOC PyDoc_STR("Python wrapping of talloc-maintained objects.") + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + .m_name = "talloc", + .m_doc = MODULE_DOC, + .m_size = -1, + .m_methods = talloc_methods, +}; + +static PyObject *module_init(void); +static PyObject *module_init(void) +{ + PyObject *m; + + if (PyType_Ready(&TallocObject_Type) < 0) + return NULL; + + if (PyType_Ready(&TallocBaseObject_Type) < 0) + return NULL; + + if (PyType_Ready(&TallocGenericObject_Type) < 0) + return NULL; + + m = PyModule_Create(&moduledef); + if (m == NULL) + return NULL; + + Py_INCREF(&TallocObject_Type); + if (PyModule_AddObject(m, "Object", (PyObject *)&TallocObject_Type)) { + goto err; + } + Py_INCREF(&TallocBaseObject_Type); + if (PyModule_AddObject(m, "BaseObject", (PyObject *)&TallocBaseObject_Type)) { + goto err; + } + Py_INCREF(&TallocGenericObject_Type); + if (PyModule_AddObject(m, "GenericObject", (PyObject *)&TallocGenericObject_Type)) { + goto err; + } + return m; + +err: + Py_DECREF(m); + return NULL; +} + +PyMODINIT_FUNC PyInit_talloc(void); +PyMODINIT_FUNC PyInit_talloc(void) +{ + return module_init(); +} diff --git a/pytalloc.h b/pytalloc.h new file mode 100644 index 0000000..c1f9b44 --- /dev/null +++ b/pytalloc.h @@ -0,0 +1,87 @@ +/* + Unix SMB/CIFS implementation. + Samba utility functions + Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#ifndef _PYTALLOC_H_ +#define _PYTALLOC_H_ + +#include <Python.h> +#include <talloc.h> + +typedef struct { + PyObject_HEAD + TALLOC_CTX *talloc_ctx; + void *ptr; /* eg the array element */ +} pytalloc_Object; + +/* Return the PyTypeObject for pytalloc_Object. Returns a borrowed reference. */ +_PUBLIC_ PyTypeObject *pytalloc_GetObjectType(void); + +/* Return the PyTypeObject for pytalloc_BaseObject. Returns a borrowed reference. */ +_PUBLIC_ PyTypeObject *pytalloc_GetBaseObjectType(void); + +/* Check whether a specific object is a talloc Object. */ +_PUBLIC_ int pytalloc_Check(PyObject *); + +_PUBLIC_ int pytalloc_BaseObject_check(PyObject *); + +_PUBLIC_ int _pytalloc_check_type(PyObject *py_obj, const char *type_name); +#define pytalloc_check_type(py_obj, type) \ + _pytalloc_check_type((PyObject *)(py_obj), #type) + +/* Retrieve the pointer for a pytalloc_object. Like talloc_get_type() + * but for pytalloc_Objects. */ +_PUBLIC_ void *_pytalloc_get_type(PyObject *py_obj, const char *type_name); +#define pytalloc_get_type(py_obj, type) ((type *)_pytalloc_get_type((PyObject *)(py_obj), #type)) + +_PUBLIC_ void *_pytalloc_get_ptr(PyObject *py_obj); +#define pytalloc_get_ptr(py_obj) _pytalloc_get_ptr((PyObject *)(py_obj)) +_PUBLIC_ TALLOC_CTX *_pytalloc_get_mem_ctx(PyObject *py_obj); +#define pytalloc_get_mem_ctx(py_obj) _pytalloc_get_mem_ctx((PyObject *)(py_obj)) + +_PUBLIC_ const char *_pytalloc_get_name(PyObject *py_obj); +#define pytalloc_get_name(py_obj) _pytalloc_get_name((PyObject *)(py_obj)) + + +_PUBLIC_ PyObject *pytalloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr); +_PUBLIC_ PyObject *pytalloc_steal(PyTypeObject *py_type, void *ptr); +_PUBLIC_ PyObject *pytalloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr); +#define pytalloc_reference(py_type, talloc_ptr) pytalloc_reference_ex(py_type, talloc_ptr, talloc_ptr) + +#define pytalloc_new(type, typeobj) pytalloc_steal(typeobj, talloc_zero(NULL, type)) + +/* + * Wrap a generic talloc pointer into a talloc.GenericObject, + * this is a subclass of talloc.BaseObject. + */ +_PUBLIC_ PyObject *pytalloc_GenericObject_steal_ex(TALLOC_CTX *mem_ctx, void *ptr); +#define pytalloc_GenericObject_steal(talloc_ptr) \ + pytalloc_GenericObject_steal_ex(talloc_ptr, talloc_ptr) +_PUBLIC_ PyObject *pytalloc_GenericObject_reference_ex(TALLOC_CTX *mem_ctx, void *ptr); +#define pytalloc_GenericObject_reference(talloc_ptr) \ + pytalloc_GenericObject_reference_ex(talloc_ptr, talloc_ptr) + +_PUBLIC_ size_t pytalloc_BaseObject_size(void); + +_PUBLIC_ int pytalloc_BaseObject_PyType_Ready(PyTypeObject *type); + +#endif /* _PYTALLOC_H_ */ diff --git a/pytalloc_guide.txt b/pytalloc_guide.txt new file mode 100644 index 0000000..85705de --- /dev/null +++ b/pytalloc_guide.txt @@ -0,0 +1,252 @@ +Using talloc in Samba4 +====================== + +.. contents:: + +Jelmer Vernooij +August 2013 + +The most current version of this document is available at + http://samba.org/ftp/unpacked/talloc/pytalloc_guide.txt + +pytalloc is a small library that provides glue for wrapping +talloc-allocated objects from C in Python objects. + +What is pytalloc, and what is it not? +------------------------------------- + +pytalloc is merely a helper library - it provides a convenient base type object +for objects that wrap talloc-maintained memory in C. It won't write your +bindings for you but it will make it easier to write C bindings that involve +talloc, and take away some of the boiler plate. + +Python 3 +-------- + +pytalloc can be used with Python 3. Usage from Python extension remains +the same, but for the C utilities, the library to link to is tagged with +Python's PEP3149 ABI tag, for example "pytalloc.cpython34m". +To make a build for Python 3, configure with PYTHON=/usr/bin/python3. +. +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +pytalloc_Object / pytalloc_BaseObject + +This is the new base class that all Python objects that wrap talloc pointers +derive from. It is itself a subclass of the "Object" type that all objects +in Python derive from. + +Note that you will almost never create objects of the pytalloc_Object type +itself, as they are just opaque pointers that can not be accessed from +Python. A common pattern is other objects that subclass pytalloc_Object and +rely on it for their memory management. + +Each `pytalloc_Object` wraps two core of information - a talloc context +and a pointer. The pointer is the actual data that is wrapped. The talloc +context is used for memory management purposes only; when the wrapping Python object +goes away, it unlinks the talloc context. The talloc context pointer and the ptr +can (and often do) have the same value. + +Each pytalloc_Object has a custom __repr__ implementation that +describes that it is a talloc object and the location of the +pointer it is wrapping. it also has a custom __cmp__/__eq__/__neq__ method that +compares the pointers the object is wrapping rather than the objects +themselves (since there can be multiple objects that wrap the same talloc +pointer). + +It is preferred to use pytalloc_BaseObject as this implementation +exposes less in the C ABI and correctly supports pointers in C arrays +in the way needed by PIDL. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyTypeObject *pytalloc_GetObjectType(void) + +Obtain a pointer to the PyTypeObject for `pytalloc_Object`. The +reference counter for the object will be NOT incremented, so the +caller MUST NOT decrement it when it no longer needs it (eg by using +`Py_DECREF`). + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyTypeObject *pytalloc_GetBaseObjectType(void) + +Obtain a pointer to the PyTypeObject for `pytalloc_BaseObject`. The +reference counter for the object will be NOT incremented, so the +caller MUST NOT decrement it when it no longer needs it (eg by using +`Py_DECREF`). + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +int pytalloc_BaseObject_PyType_Ready(PyTypeObject *type); + +Wrapper for PyType_Ready() that will set the correct values into +the PyTypeObject to create a BaseObject + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=- +int pytalloc_Check(PyObject *) + +Check whether a specific object is a talloc Object. Returns non-zero if it is +a pytalloc_Object and zero otherwise. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-==-=-=-=-=-=-=-=-=-=-=- +int pytalloc_BaseObject_Check(PyObject *) + +Check whether a specific object is a talloc BaseObject. Returns non-zero if it is +a pytalloc_BaseObject and zero otherwise. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +int pytalloc_check_type(PyObject *py_obj, type) + +Check if the object based on `pytalloc_*Object` py_obj. type should be a +C type, similar to a type passed to `talloc_get_type`. +This can be used as a check before using pytalloc_get_type() +or an alternative codepath. Returns non-zero if it is +an object of the expected type and zero otherwise. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +type *pytalloc_get_type(PyObject *py_obj, type) + +Retrieve the pointer from a `pytalloc_Object` py_obj. type should be a +C type, similar to a type passed to `talloc_get_type`. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +pytalloc_get_ptr(PyObject *py_obj) + +Retrieve the pointer from a `pytalloc_Object` or `pytalloc_BaseObject` +py_obj. There is no type checking - use `pytalloc_get_type` if +possible. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +TALLOC_CTX *pytalloc_get_mem_ctx(PyObject *py_obj) + +Retrieve the talloc context associated with a pytalloc_Object or pytalloc_BaseObject. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr) + +Create a new Python wrapping object for a talloc pointer and context, with +py_type as associated Python sub type object. This typically used +when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. +`pytalloc_get_ptr()` can be used to get the pointer out of the object again. + +This will *not* increment the reference counter for the talloc context, +so the caller should make sure such an increment has happened. When the Python +object goes away, it will unreference the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_steal(PyTypeObject *py_type, void *ptr) + +Create a new Python wrapping object for a talloc pointer and context, with +py_type as associated Python sub type object. The pointer will also be used +as the talloc context. `pytalloc_get_type()` can be used to get +the pointer out of the object again. + +This will *not* increment the reference counter for the talloc context, +so the caller should make sure such an increment has happened. When the Python +object goes away, it will unreference the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_reference_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, void *ptr) + +Create a new Python wrapping object for a talloc pointer and context, with +py_type as associated Python sub type object. This typically used +when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. +`pytalloc_get_ptr()` can be used to get the pointer out of the object again. + +This will increment the reference counter for the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_reference(PyTypeObject *py_type, void *talloc_ptr) + +Create a new Python wrapping object for a talloc pointer, with +py_type as associated Python sub type object. The pointer will also be used +as the talloc context. `pytalloc_get_type()` can be used to get +the pointer out of the object again. + +This will increment the reference counter for the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_new(type, PyTypeObject *typeobj) + +Create a new, empty pytalloc_Object with the specified Python type object. type +should be a C type, similar to talloc_new(). + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_GenericObject_steal_ex(void *ptr) + +Create a new Python wrapping object for a generic talloc pointer, +as sub type of `pytalloc_BaseObject`. This typically used +when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. +`pytalloc_get_ptr()` can be used to get the pointer out of the object again. + +This will *not* increment the reference counter for the talloc context, +so the caller should make sure such an increment has happened. When the Python +object goes away, it will unreference the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_GenericObject_steal(void *ptr) + +Create a new Python wrapping object for a generic talloc pointer, +as sub type of `pytalloc_BaseObject`. The pointer will also be used +as the talloc context. `pytalloc_get_type()` can be used to get +the pointer out of the object again. + +This will *not* increment the reference counter for the talloc context, +so the caller should make sure such an increment has happened. When the Python +object goes away, it will unreference the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_GenericObject_reference_ex(void *ptr) + +Create a new Python wrapping object for a generic talloc pointer, +as sub type of `pytalloc_BaseObject`. This typically used +when `mem_ctx` and `ptr` differ, e.g. a pointer to an array element. +`pytalloc_get_ptr()` can be used to get the pointer out of the object again. + +This will increment the reference counter for the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +PyObject *pytalloc_GenericObject_reference(void *ptr) + +Create a new Python wrapping object for a generic talloc pointer, +as sub type of `pytalloc_BaseObject`. The pointer will also be used +as the talloc context. `pytalloc_get_type()` can be used to get +the pointer out of the object again. + +This will increment the reference counter for the talloc context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +DEPRECATED! PyObject *pytalloc_CObject_FromTallocPtr(void *); + +Create a new pytalloc_Object for an arbitrary talloc-maintained C pointer. This will +use a generic VoidPtr Python type, which just provides an opaque object in +Python. The caller is responsible for incrementing the talloc reference count before calling +this function - it will dereference the talloc pointer when it is garbage collected. + +This function is deprecated and only available on Python 2. +Use pytalloc_GenericObject_{reference,steal}[_ex]() instead. + +Debug function for talloc in Python +----------------------------------- + +The "talloc" module in Python provides a couple of functions that can be used +to debug issues with objects wrapped by pytalloc. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +report_full(obj?) + +Print a full report on a specific object or on all allocated objects by Python. +Same behaviour as the `talloc_report_full()` function that is provided by +C talloc. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +enable_null_tracking() + +This enables tracking of the NULL memory context without enabling leak +reporting on exit. Useful for when you want to do your own leak +reporting call via talloc_report_null_full(). + +This must be done in the top level script, not an imported module. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +pytalloc_total_blocks(obj?) + +Return the talloc block count for all allocated objects or a specific object if +specified. diff --git a/pytalloc_private.h b/pytalloc_private.h new file mode 100644 index 0000000..b23cdfc --- /dev/null +++ b/pytalloc_private.h @@ -0,0 +1,26 @@ +/* + Unix SMB/CIFS implementation. + Samba utility functions + Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008 + Copyright (C) Andrew Bartlett <abartlet@samba.org> 2016 + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see <http://www.gnu.org/licenses/>. +*/ + +typedef struct { + PyObject_HEAD + TALLOC_CTX *talloc_ctx; + TALLOC_CTX *talloc_ptr_ctx; /* eg the start of the array */ + void *ptr; /* eg the array element */ +} pytalloc_BaseObject; diff --git a/pytalloc_util.c b/pytalloc_util.c new file mode 100644 index 0000000..766938a --- /dev/null +++ b/pytalloc_util.c @@ -0,0 +1,334 @@ +/* + Unix SMB/CIFS implementation. + Python/Talloc glue + Copyright (C) Jelmer Vernooij <jelmer@samba.org> 2008 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "lib/replace/system/python.h" +#include "replace.h" +#include <talloc.h> +#include "pytalloc.h" +#include <assert.h> +#include "pytalloc_private.h" + +static PyObject *pytalloc_steal_or_reference(PyTypeObject *py_type, + TALLOC_CTX *mem_ctx, void *ptr, bool steal); + +_PUBLIC_ PyTypeObject *pytalloc_GetObjectType(void) +{ + static PyTypeObject *type = NULL; + PyObject *mod; + + mod = PyImport_ImportModule("talloc"); + if (mod == NULL) { + return NULL; + } + + type = (PyTypeObject *)PyObject_GetAttrString(mod, "Object"); + Py_DECREF(mod); + + return type; +} + +_PUBLIC_ PyTypeObject *pytalloc_GetBaseObjectType(void) +{ + static PyTypeObject *type = NULL; + PyObject *mod; + + mod = PyImport_ImportModule("talloc"); + if (mod == NULL) { + return NULL; + } + + type = (PyTypeObject *)PyObject_GetAttrString(mod, "BaseObject"); + Py_DECREF(mod); + + return type; +} + +static PyTypeObject *pytalloc_GetGenericObjectType(void) +{ + static PyTypeObject *type = NULL; + PyObject *mod; + + mod = PyImport_ImportModule("talloc"); + if (mod == NULL) { + return NULL; + } + + type = (PyTypeObject *)PyObject_GetAttrString(mod, "GenericObject"); + Py_DECREF(mod); + + return type; +} + +/** + * Import an existing talloc pointer into a Python object. + */ +_PUBLIC_ PyObject *pytalloc_steal_ex(PyTypeObject *py_type, TALLOC_CTX *mem_ctx, + void *ptr) +{ + return pytalloc_steal_or_reference(py_type, mem_ctx, ptr, true); +} + +/** + * Import an existing talloc pointer into a Python object. + */ +_PUBLIC_ PyObject *pytalloc_steal(PyTypeObject *py_type, void *ptr) +{ + return pytalloc_steal_or_reference(py_type, ptr, ptr, true); +} + + +/** + * Import an existing talloc pointer into a Python object, leaving the + * original parent, and creating a reference to the object in the python + * object. + * + * We remember the object we hold the reference to (a + * possibly-non-talloc pointer), the existing parent (typically the + * start of the array) and the new referenced parent. That way we can + * cope with the fact that we will have multiple parents, one per time + * python sees the object. + */ +_PUBLIC_ PyObject *pytalloc_reference_ex(PyTypeObject *py_type, + TALLOC_CTX *mem_ctx, void *ptr) +{ + return pytalloc_steal_or_reference(py_type, mem_ctx, ptr, false); +} + + +/** + * Internal function that either steals or references the talloc + * pointer into a new talloc context. + */ +static PyObject *pytalloc_steal_or_reference(PyTypeObject *py_type, + TALLOC_CTX *mem_ctx, void *ptr, bool steal) +{ + bool ok = false; + TALLOC_CTX *talloc_ctx = NULL; + bool is_baseobject = false; + PyObject *obj = NULL; + PyTypeObject *BaseObjectType = NULL, *ObjectType = NULL; + + BaseObjectType = pytalloc_GetBaseObjectType(); + if (BaseObjectType == NULL) { + goto err; + } + ObjectType = pytalloc_GetObjectType(); + if (ObjectType == NULL) { + goto err; + } + + /* this should have been tested by caller */ + if (mem_ctx == NULL) { + return PyErr_NoMemory(); + } + + is_baseobject = PyType_IsSubtype(py_type, BaseObjectType); + if (!is_baseobject) { + if (!PyType_IsSubtype(py_type, ObjectType)) { + PyErr_SetString(PyExc_TypeError, + "Expected type based on talloc"); + return NULL; + } + } + + obj = py_type->tp_alloc(py_type, 0); + if (obj == NULL) { + goto err; + } + + talloc_ctx = talloc_new(NULL); + if (talloc_ctx == NULL) { + PyErr_NoMemory(); + goto err; + } + + if (steal) { + ok = (talloc_steal(talloc_ctx, mem_ctx) != NULL); + } else { + ok = (talloc_reference(talloc_ctx, mem_ctx) != NULL); + } + if (!ok) { + goto err; + } + talloc_set_name_const(talloc_ctx, py_type->tp_name); + + if (is_baseobject) { + pytalloc_BaseObject *ret = (pytalloc_BaseObject*)obj; + ret->talloc_ctx = talloc_ctx; + ret->talloc_ptr_ctx = mem_ctx; + ret->ptr = ptr; + } else { + pytalloc_Object *ret = (pytalloc_Object*)obj; + ret->talloc_ctx = talloc_ctx; + ret->ptr = ptr; + } + return obj; + +err: + TALLOC_FREE(talloc_ctx); + Py_XDECREF(obj); + return NULL; +} + +/* + * Wrap a generic talloc pointer into a talloc.GenericObject, + * this is a subclass of talloc.BaseObject. + */ +_PUBLIC_ PyObject *pytalloc_GenericObject_steal_ex(TALLOC_CTX *mem_ctx, void *ptr) +{ + PyTypeObject *tp = pytalloc_GetGenericObjectType(); + return pytalloc_steal_ex(tp, mem_ctx, ptr); +} + +/* + * Wrap a generic talloc pointer into a talloc.GenericObject, + * this is a subclass of talloc.BaseObject. + */ +_PUBLIC_ PyObject *pytalloc_GenericObject_reference_ex(TALLOC_CTX *mem_ctx, void *ptr) +{ + PyTypeObject *tp = pytalloc_GetGenericObjectType(); + return pytalloc_reference_ex(tp, mem_ctx, ptr); +} + +_PUBLIC_ int pytalloc_Check(PyObject *obj) +{ + PyTypeObject *tp = pytalloc_GetObjectType(); + + return PyObject_TypeCheck(obj, tp); +} + +_PUBLIC_ int pytalloc_BaseObject_check(PyObject *obj) +{ + PyTypeObject *tp = pytalloc_GetBaseObjectType(); + + return PyObject_TypeCheck(obj, tp); +} + +_PUBLIC_ size_t pytalloc_BaseObject_size(void) +{ + return sizeof(pytalloc_BaseObject); +} + +static void *_pytalloc_get_checked_type(PyObject *py_obj, const char *type_name, + bool check_only, const char *function) +{ + TALLOC_CTX *mem_ctx; + void *ptr = NULL; + void *type_obj; + + mem_ctx = _pytalloc_get_mem_ctx(py_obj); + ptr = _pytalloc_get_ptr(py_obj); + + if (mem_ctx != ptr || ptr == NULL) { + if (check_only) { + return NULL; + } + + PyErr_Format(PyExc_TypeError, "%s: expected %s, " + "but the pointer is no talloc pointer, " + "pytalloc_get_ptr() would get the raw pointer.", + function, type_name); + return NULL; + } + + type_obj = talloc_check_name(ptr, type_name); + if (type_obj == NULL) { + const char *name = NULL; + + if (check_only) { + return NULL; + } + + name = talloc_get_name(ptr); + PyErr_Format(PyExc_TypeError, "%s: expected %s, got %s", + function, type_name, name); + return NULL; + } + + return ptr; +} + +_PUBLIC_ int _pytalloc_check_type(PyObject *py_obj, const char *type_name) +{ + void *ptr = NULL; + + ptr = _pytalloc_get_checked_type(py_obj, type_name, + true, /* check_only */ + "pytalloc_check_type"); + if (ptr == NULL) { + return 0; + } + + return 1; +} + +_PUBLIC_ void *_pytalloc_get_type(PyObject *py_obj, const char *type_name) +{ + return _pytalloc_get_checked_type(py_obj, type_name, + false, /* not check_only */ + "pytalloc_get_type"); +} + +_PUBLIC_ void *_pytalloc_get_ptr(PyObject *py_obj) +{ + if (pytalloc_BaseObject_check(py_obj)) { + return ((pytalloc_BaseObject *)py_obj)->ptr; + } + if (pytalloc_Check(py_obj)) { + return ((pytalloc_Object *)py_obj)->ptr; + } + return NULL; +} + +_PUBLIC_ TALLOC_CTX *_pytalloc_get_mem_ctx(PyObject *py_obj) +{ + if (pytalloc_BaseObject_check(py_obj)) { + return ((pytalloc_BaseObject *)py_obj)->talloc_ptr_ctx; + } + if (pytalloc_Check(py_obj)) { + return ((pytalloc_Object *)py_obj)->talloc_ctx; + } + return NULL; +} + +_PUBLIC_ int pytalloc_BaseObject_PyType_Ready(PyTypeObject *type) +{ + PyTypeObject *talloc_type = pytalloc_GetBaseObjectType(); + if (talloc_type == NULL) { + return -1; + } + + type->tp_base = talloc_type; + type->tp_basicsize = pytalloc_BaseObject_size(); + + return PyType_Ready(type); +} + +_PUBLIC_ const char *_pytalloc_get_name(PyObject *obj) +{ + void *ptr = pytalloc_get_ptr(obj); + if (ptr == NULL) { + return "non-talloc object"; + } + return talloc_get_name(ptr); +} diff --git a/talloc.c b/talloc.c new file mode 100644 index 0000000..3a93a82 --- /dev/null +++ b/talloc.c @@ -0,0 +1,3070 @@ +/* + Samba Unix SMB/CIFS implementation. + + Samba trivial allocation library - new interface + + NOTE: Please read talloc_guide.txt for full documentation + + Copyright (C) Andrew Tridgell 2004 + Copyright (C) Stefan Metzmacher 2006 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +/* + inspired by http://swapped.cc/halloc/ +*/ + +#include "replace.h" +#include "talloc.h" + +#ifdef HAVE_SYS_AUXV_H +#include <sys/auxv.h> +#endif + +#if (TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR) +#error "TALLOC_VERSION_MAJOR != TALLOC_BUILD_VERSION_MAJOR" +#endif + +#if (TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR) +#error "TALLOC_VERSION_MINOR != TALLOC_BUILD_VERSION_MINOR" +#endif + +/* Special macros that are no-ops except when run under Valgrind on + * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */ +#ifdef HAVE_VALGRIND_MEMCHECK_H + /* memcheck.h includes valgrind.h */ +#include <valgrind/memcheck.h> +#elif defined(HAVE_VALGRIND_H) +#include <valgrind.h> +#endif + +#define MAX_TALLOC_SIZE 0x10000000 + +#define TALLOC_FLAG_FREE 0x01 +#define TALLOC_FLAG_LOOP 0x02 +#define TALLOC_FLAG_POOL 0x04 /* This is a talloc pool */ +#define TALLOC_FLAG_POOLMEM 0x08 /* This is allocated in a pool */ + +/* + * Bits above this are random, used to make it harder to fake talloc + * headers during an attack. Try not to change this without good reason. + */ +#define TALLOC_FLAG_MASK 0x0F + +#define TALLOC_MAGIC_REFERENCE ((const char *)1) + +#define TALLOC_MAGIC_BASE 0xe814ec70 +#define TALLOC_MAGIC_NON_RANDOM ( \ + ~TALLOC_FLAG_MASK & ( \ + TALLOC_MAGIC_BASE + \ + (TALLOC_BUILD_VERSION_MAJOR << 24) + \ + (TALLOC_BUILD_VERSION_MINOR << 16) + \ + (TALLOC_BUILD_VERSION_RELEASE << 8))) +static unsigned int talloc_magic = TALLOC_MAGIC_NON_RANDOM; + +/* by default we abort when given a bad pointer (such as when talloc_free() is called + on a pointer that came from malloc() */ +#ifndef TALLOC_ABORT +#define TALLOC_ABORT(reason) abort() +#endif + +#ifndef discard_const_p +#if defined(__intptr_t_defined) || defined(HAVE_INTPTR_T) +# define discard_const_p(type, ptr) ((type *)((intptr_t)(ptr))) +#else +# define discard_const_p(type, ptr) ((type *)(ptr)) +#endif +#endif + +/* these macros gain us a few percent of speed on gcc */ +#if (__GNUC__ >= 3) +/* the strange !! is to ensure that __builtin_expect() takes either 0 or 1 + as its first argument */ +#ifndef likely +#define likely(x) __builtin_expect(!!(x), 1) +#endif +#ifndef unlikely +#define unlikely(x) __builtin_expect(!!(x), 0) +#endif +#else +#ifndef likely +#define likely(x) (x) +#endif +#ifndef unlikely +#define unlikely(x) (x) +#endif +#endif + +/* this null_context is only used if talloc_enable_leak_report() or + talloc_enable_leak_report_full() is called, otherwise it remains + NULL +*/ +static void *null_context; +static bool talloc_report_null; +static bool talloc_report_null_full; +static void *autofree_context; + +static void talloc_setup_atexit(void); + +/* used to enable fill of memory on free, which can be useful for + * catching use after free errors when valgrind is too slow + */ +static struct { + bool initialised; + bool enabled; + uint8_t fill_value; +} talloc_fill; + +#define TALLOC_FILL_ENV "TALLOC_FREE_FILL" + +/* + * do not wipe the header, to allow the + * double-free logic to still work + */ +#define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \ + if (unlikely(talloc_fill.enabled)) { \ + size_t _flen = (_tc)->size; \ + char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ + memset(_fptr, talloc_fill.fill_value, _flen); \ + } \ +} while (0) + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) +/* Mark the whole chunk as not accessible */ +#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \ + size_t _flen = TC_HDR_SIZE + (_tc)->size; \ + char *_fptr = (char *)(_tc); \ + VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \ +} while(0) +#else +#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0) +#endif + +#define TC_INVALIDATE_FULL_CHUNK(_tc) do { \ + TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \ + TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \ +} while (0) + +#define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \ + if (unlikely(talloc_fill.enabled)) { \ + size_t _flen = (_tc)->size - (_new_size); \ + char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ + _fptr += (_new_size); \ + memset(_fptr, talloc_fill.fill_value, _flen); \ + } \ +} while (0) + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) +/* Mark the unused bytes not accessible */ +#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \ + size_t _flen = (_tc)->size - (_new_size); \ + char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ + _fptr += (_new_size); \ + VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \ +} while (0) +#else +#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) +#endif + +#define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \ + TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \ + TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \ +} while (0) + +#define TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \ + if (unlikely(talloc_fill.enabled)) { \ + size_t _flen = (_tc)->size - (_new_size); \ + char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ + _fptr += (_new_size); \ + memset(_fptr, talloc_fill.fill_value, _flen); \ + } \ +} while (0) + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) +/* Mark the unused bytes as undefined */ +#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \ + size_t _flen = (_tc)->size - (_new_size); \ + char *_fptr = (char *)TC_PTR_FROM_CHUNK(_tc); \ + _fptr += (_new_size); \ + VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \ +} while (0) +#else +#define TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) +#endif + +#define TC_UNDEFINE_SHRINK_CHUNK(_tc, _new_size) do { \ + TC_UNDEFINE_SHRINK_FILL_CHUNK(_tc, _new_size); \ + TC_UNDEFINE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \ +} while (0) + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) +/* Mark the new bytes as undefined */ +#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \ + size_t _old_used = TC_HDR_SIZE + (_tc)->size; \ + size_t _new_used = TC_HDR_SIZE + (_new_size); \ + size_t _flen = _new_used - _old_used; \ + char *_fptr = _old_used + (char *)(_tc); \ + VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \ +} while (0) +#else +#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0) +#endif + +#define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \ + TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \ +} while (0) + +struct talloc_reference_handle { + struct talloc_reference_handle *next, *prev; + void *ptr; + const char *location; +}; + +struct talloc_memlimit { + struct talloc_chunk *parent; + struct talloc_memlimit *upper; + size_t max_size; + size_t cur_size; +}; + +static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size); +static inline void talloc_memlimit_grow(struct talloc_memlimit *limit, + size_t size); +static inline void talloc_memlimit_shrink(struct talloc_memlimit *limit, + size_t size); +static inline void tc_memlimit_update_on_free(struct talloc_chunk *tc); + +static inline void _tc_set_name_const(struct talloc_chunk *tc, + const char *name); +static struct talloc_chunk *_vasprintf_tc(const void *t, + const char *fmt, + va_list ap); + +typedef int (*talloc_destructor_t)(void *); + +struct talloc_pool_hdr; + +struct talloc_chunk { + /* + * flags includes the talloc magic, which is randomised to + * make overwrite attacks harder + */ + unsigned flags; + + /* + * If you have a logical tree like: + * + * <parent> + * / | \ + * / | \ + * / | \ + * <child 1> <child 2> <child 3> + * + * The actual talloc tree is: + * + * <parent> + * | + * <child 1> - <child 2> - <child 3> + * + * The children are linked with next/prev pointers, and + * child 1 is linked to the parent with parent/child + * pointers. + */ + + struct talloc_chunk *next, *prev; + struct talloc_chunk *parent, *child; + struct talloc_reference_handle *refs; + talloc_destructor_t destructor; + const char *name; + size_t size; + + /* + * limit semantics: + * if 'limit' is set it means all *new* children of the context will + * be limited to a total aggregate size ox max_size for memory + * allocations. + * cur_size is used to keep track of the current use + */ + struct talloc_memlimit *limit; + + /* + * For members of a pool (i.e. TALLOC_FLAG_POOLMEM is set), "pool" + * is a pointer to the struct talloc_chunk of the pool that it was + * allocated from. This way children can quickly find the pool to chew + * from. + */ + struct talloc_pool_hdr *pool; +}; + +union talloc_chunk_cast_u { + uint8_t *ptr; + struct talloc_chunk *chunk; +}; + +/* 16 byte alignment seems to keep everyone happy */ +#define TC_ALIGN16(s) (((s)+15)&~15) +#define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk)) +#define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc)) + +_PUBLIC_ int talloc_version_major(void) +{ + return TALLOC_VERSION_MAJOR; +} + +_PUBLIC_ int talloc_version_minor(void) +{ + return TALLOC_VERSION_MINOR; +} + +_PUBLIC_ int talloc_test_get_magic(void) +{ + return talloc_magic; +} + +static inline void _talloc_chunk_set_free(struct talloc_chunk *tc, + const char *location) +{ + /* + * Mark this memory as free, and also over-stamp the talloc + * magic with the old-style magic. + * + * Why? This tries to avoid a memory read use-after-free from + * disclosing our talloc magic, which would then allow an + * attacker to prepare a valid header and so run a destructor. + * + */ + tc->flags = TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE + | (tc->flags & TALLOC_FLAG_MASK); + + /* we mark the freed memory with where we called the free + * from. This means on a double free error we can report where + * the first free came from + */ + if (location) { + tc->name = location; + } +} + +static inline void _talloc_chunk_set_not_free(struct talloc_chunk *tc) +{ + /* + * Mark this memory as not free. + * + * Why? This is memory either in a pool (and so available for + * talloc's re-use or after the realloc(). We need to mark + * the memory as free() before any realloc() call as we can't + * write to the memory after that. + * + * We put back the normal magic instead of the 'not random' + * magic. + */ + + tc->flags = talloc_magic | + ((tc->flags & TALLOC_FLAG_MASK) & ~TALLOC_FLAG_FREE); +} + +static void (*talloc_log_fn)(const char *message); + +_PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message)) +{ + talloc_log_fn = log_fn; +} + +#ifdef HAVE_CONSTRUCTOR_ATTRIBUTE +#define CONSTRUCTOR __attribute__((constructor)) +#elif defined(HAVE_PRAGMA_INIT) +#define CONSTRUCTOR +#pragma init (talloc_lib_init) +#endif +#if defined(HAVE_CONSTRUCTOR_ATTRIBUTE) || defined(HAVE_PRAGMA_INIT) +void talloc_lib_init(void) CONSTRUCTOR; +void talloc_lib_init(void) +{ + uint32_t random_value; +#if defined(HAVE_GETAUXVAL) && defined(AT_RANDOM) + uint8_t *p; + /* + * Use the kernel-provided random values used for + * ASLR. This won't change per-exec, which is ideal for us + */ + p = (uint8_t *) getauxval(AT_RANDOM); + if (p) { + /* + * We get 16 bytes from getauxval. By calling rand(), + * a totally insecure PRNG, but one that will + * deterministically have a different value when called + * twice, we ensure that if two talloc-like libraries + * are somehow loaded in the same address space, that + * because we choose different bytes, we will keep the + * protection against collision of multiple talloc + * libs. + * + * This protection is important because the effects of + * passing a talloc pointer from one to the other may + * be very hard to determine. + */ + int offset = rand() % (16 - sizeof(random_value)); + memcpy(&random_value, p + offset, sizeof(random_value)); + } else +#endif + { + /* + * Otherwise, hope the location we are loaded in + * memory is randomised by someone else + */ + random_value = ((uintptr_t)talloc_lib_init & 0xFFFFFFFF); + } + talloc_magic = random_value & ~TALLOC_FLAG_MASK; +} +#else +#warning "No __attribute__((constructor)) support found on this platform, additional talloc security measures not available" +#endif + +static void talloc_lib_atexit(void) +{ + TALLOC_FREE(autofree_context); + + if (talloc_total_size(null_context) == 0) { + return; + } + + if (talloc_report_null_full) { + talloc_report_full(null_context, stderr); + } else if (talloc_report_null) { + talloc_report(null_context, stderr); + } +} + +static void talloc_setup_atexit(void) +{ + static bool done; + + if (done) { + return; + } + + atexit(talloc_lib_atexit); + done = true; +} + +static void talloc_log(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2); +static void talloc_log(const char *fmt, ...) +{ + va_list ap; + char *message; + + if (!talloc_log_fn) { + return; + } + + va_start(ap, fmt); + message = talloc_vasprintf(NULL, fmt, ap); + va_end(ap); + + talloc_log_fn(message); + talloc_free(message); +} + +static void talloc_log_stderr(const char *message) +{ + fprintf(stderr, "%s", message); +} + +_PUBLIC_ void talloc_set_log_stderr(void) +{ + talloc_set_log_fn(talloc_log_stderr); +} + +static void (*talloc_abort_fn)(const char *reason); + +_PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason)) +{ + talloc_abort_fn = abort_fn; +} + +static void talloc_abort(const char *reason) +{ + talloc_log("%s\n", reason); + + if (!talloc_abort_fn) { + TALLOC_ABORT(reason); + } + + talloc_abort_fn(reason); +} + +static void talloc_abort_access_after_free(void) +{ + talloc_abort("Bad talloc magic value - access after free"); +} + +static void talloc_abort_unknown_value(void) +{ + talloc_abort("Bad talloc magic value - unknown value"); +} + +/* panic if we get a bad magic value */ +static inline struct talloc_chunk *talloc_chunk_from_ptr(const void *ptr) +{ + const char *pp = (const char *)ptr; + struct talloc_chunk *tc = discard_const_p(struct talloc_chunk, pp - TC_HDR_SIZE); + if (unlikely((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) != talloc_magic)) { + if ((tc->flags & (TALLOC_FLAG_FREE | ~TALLOC_FLAG_MASK)) + == (TALLOC_MAGIC_NON_RANDOM | TALLOC_FLAG_FREE)) { + talloc_log("talloc: access after free error - first free may be at %s\n", tc->name); + talloc_abort_access_after_free(); + return NULL; + } + + talloc_abort_unknown_value(); + return NULL; + } + return tc; +} + +/* hook into the front of the list */ +#define _TLIST_ADD(list, p) \ +do { \ + if (!(list)) { \ + (list) = (p); \ + (p)->next = (p)->prev = NULL; \ + } else { \ + (list)->prev = (p); \ + (p)->next = (list); \ + (p)->prev = NULL; \ + (list) = (p); \ + }\ +} while (0) + +/* remove an element from a list - element doesn't have to be in list. */ +#define _TLIST_REMOVE(list, p) \ +do { \ + if ((p) == (list)) { \ + (list) = (p)->next; \ + if (list) (list)->prev = NULL; \ + } else { \ + if ((p)->prev) (p)->prev->next = (p)->next; \ + if ((p)->next) (p)->next->prev = (p)->prev; \ + } \ + if ((p) && ((p) != (list))) (p)->next = (p)->prev = NULL; \ +} while (0) + + +/* + return the parent chunk of a pointer +*/ +static inline struct talloc_chunk *talloc_parent_chunk(const void *ptr) +{ + struct talloc_chunk *tc; + + if (unlikely(ptr == NULL)) { + return NULL; + } + + tc = talloc_chunk_from_ptr(ptr); + while (tc->prev) tc=tc->prev; + + return tc->parent; +} + +_PUBLIC_ void *talloc_parent(const void *ptr) +{ + struct talloc_chunk *tc = talloc_parent_chunk(ptr); + return tc? TC_PTR_FROM_CHUNK(tc) : NULL; +} + +/* + find parents name +*/ +_PUBLIC_ const char *talloc_parent_name(const void *ptr) +{ + struct talloc_chunk *tc = talloc_parent_chunk(ptr); + return tc? tc->name : NULL; +} + +/* + A pool carries an in-pool object count count in the first 16 bytes. + bytes. This is done to support talloc_steal() to a parent outside of the + pool. The count includes the pool itself, so a talloc_free() on a pool will + only destroy the pool if the count has dropped to zero. A talloc_free() of a + pool member will reduce the count, and eventually also call free(3) on the + pool memory. + + The object count is not put into "struct talloc_chunk" because it is only + relevant for talloc pools and the alignment to 16 bytes would increase the + memory footprint of each talloc chunk by those 16 bytes. +*/ + +struct talloc_pool_hdr { + void *end; + unsigned int object_count; + size_t poolsize; +}; + +union talloc_pool_hdr_cast_u { + uint8_t *ptr; + struct talloc_pool_hdr *hdr; +}; + +#define TP_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_pool_hdr)) + +static inline struct talloc_pool_hdr *talloc_pool_from_chunk(struct talloc_chunk *c) +{ + union talloc_chunk_cast_u tcc = { .chunk = c }; + union talloc_pool_hdr_cast_u tphc = { tcc.ptr - TP_HDR_SIZE }; + return tphc.hdr; +} + +static inline struct talloc_chunk *talloc_chunk_from_pool(struct talloc_pool_hdr *h) +{ + union talloc_pool_hdr_cast_u tphc = { .hdr = h }; + union talloc_chunk_cast_u tcc = { .ptr = tphc.ptr + TP_HDR_SIZE }; + return tcc.chunk; +} + +static inline void *tc_pool_end(struct talloc_pool_hdr *pool_hdr) +{ + struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr); + return (char *)tc + TC_HDR_SIZE + pool_hdr->poolsize; +} + +static inline size_t tc_pool_space_left(struct talloc_pool_hdr *pool_hdr) +{ + return (char *)tc_pool_end(pool_hdr) - (char *)pool_hdr->end; +} + +/* If tc is inside a pool, this gives the next neighbour. */ +static inline void *tc_next_chunk(struct talloc_chunk *tc) +{ + return (char *)tc + TC_ALIGN16(TC_HDR_SIZE + tc->size); +} + +static inline void *tc_pool_first_chunk(struct talloc_pool_hdr *pool_hdr) +{ + struct talloc_chunk *tc = talloc_chunk_from_pool(pool_hdr); + return tc_next_chunk(tc); +} + +/* Mark the whole remaining pool as not accessible */ +static inline void tc_invalidate_pool(struct talloc_pool_hdr *pool_hdr) +{ + size_t flen = tc_pool_space_left(pool_hdr); + + if (unlikely(talloc_fill.enabled)) { + memset(pool_hdr->end, talloc_fill.fill_value, flen); + } + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS) + VALGRIND_MAKE_MEM_NOACCESS(pool_hdr->end, flen); +#endif +} + +/* + Allocate from a pool +*/ + +static inline struct talloc_chunk *tc_alloc_pool(struct talloc_chunk *parent, + size_t size, size_t prefix_len) +{ + struct talloc_pool_hdr *pool_hdr = NULL; + union talloc_chunk_cast_u tcc; + size_t space_left; + struct talloc_chunk *result; + size_t chunk_size; + + if (parent == NULL) { + return NULL; + } + + if (parent->flags & TALLOC_FLAG_POOL) { + pool_hdr = talloc_pool_from_chunk(parent); + } + else if (parent->flags & TALLOC_FLAG_POOLMEM) { + pool_hdr = parent->pool; + } + + if (pool_hdr == NULL) { + return NULL; + } + + space_left = tc_pool_space_left(pool_hdr); + + /* + * Align size to 16 bytes + */ + chunk_size = TC_ALIGN16(size + prefix_len); + + if (space_left < chunk_size) { + return NULL; + } + + tcc = (union talloc_chunk_cast_u) { + .ptr = ((uint8_t *)pool_hdr->end) + prefix_len + }; + result = tcc.chunk; + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) + VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, chunk_size); +#endif + + pool_hdr->end = (void *)((char *)pool_hdr->end + chunk_size); + + result->flags = talloc_magic | TALLOC_FLAG_POOLMEM; + result->pool = pool_hdr; + + pool_hdr->object_count++; + + return result; +} + +/* + Allocate a bit of memory as a child of an existing pointer +*/ +static inline void *__talloc_with_prefix(const void *context, + size_t size, + size_t prefix_len, + struct talloc_chunk **tc_ret) +{ + struct talloc_chunk *tc = NULL; + struct talloc_memlimit *limit = NULL; + size_t total_len = TC_HDR_SIZE + size + prefix_len; + struct talloc_chunk *parent = NULL; + + if (unlikely(context == NULL)) { + context = null_context; + } + + if (unlikely(size >= MAX_TALLOC_SIZE)) { + return NULL; + } + + if (unlikely(total_len < TC_HDR_SIZE)) { + return NULL; + } + + if (likely(context != NULL)) { + parent = talloc_chunk_from_ptr(context); + + if (parent->limit != NULL) { + limit = parent->limit; + } + + tc = tc_alloc_pool(parent, TC_HDR_SIZE+size, prefix_len); + } + + if (tc == NULL) { + uint8_t *ptr = NULL; + union talloc_chunk_cast_u tcc; + + /* + * Only do the memlimit check/update on actual allocation. + */ + if (!talloc_memlimit_check(limit, total_len)) { + errno = ENOMEM; + return NULL; + } + + ptr = malloc(total_len); + if (unlikely(ptr == NULL)) { + return NULL; + } + tcc = (union talloc_chunk_cast_u) { .ptr = ptr + prefix_len }; + tc = tcc.chunk; + tc->flags = talloc_magic; + tc->pool = NULL; + + talloc_memlimit_grow(limit, total_len); + } + + tc->limit = limit; + tc->size = size; + tc->destructor = NULL; + tc->child = NULL; + tc->name = NULL; + tc->refs = NULL; + + if (likely(context != NULL)) { + if (parent->child) { + parent->child->parent = NULL; + tc->next = parent->child; + tc->next->prev = tc; + } else { + tc->next = NULL; + } + tc->parent = parent; + tc->prev = NULL; + parent->child = tc; + } else { + tc->next = tc->prev = tc->parent = NULL; + } + + *tc_ret = tc; + return TC_PTR_FROM_CHUNK(tc); +} + +static inline void *__talloc(const void *context, + size_t size, + struct talloc_chunk **tc) +{ + return __talloc_with_prefix(context, size, 0, tc); +} + +/* + * Create a talloc pool + */ + +static inline void *_talloc_pool(const void *context, size_t size) +{ + struct talloc_chunk *tc = NULL; + struct talloc_pool_hdr *pool_hdr; + void *result; + + result = __talloc_with_prefix(context, size, TP_HDR_SIZE, &tc); + + if (unlikely(result == NULL)) { + return NULL; + } + + pool_hdr = talloc_pool_from_chunk(tc); + + tc->flags |= TALLOC_FLAG_POOL; + tc->size = 0; + + pool_hdr->object_count = 1; + pool_hdr->end = result; + pool_hdr->poolsize = size; + + tc_invalidate_pool(pool_hdr); + + return result; +} + +_PUBLIC_ void *talloc_pool(const void *context, size_t size) +{ + return _talloc_pool(context, size); +} + +/* + * Create a talloc pool correctly sized for a basic size plus + * a number of subobjects whose total size is given. Essentially + * a custom allocator for talloc to reduce fragmentation. + */ + +_PUBLIC_ void *_talloc_pooled_object(const void *ctx, + size_t type_size, + const char *type_name, + unsigned num_subobjects, + size_t total_subobjects_size) +{ + size_t poolsize, subobjects_slack, tmp; + struct talloc_chunk *tc; + struct talloc_pool_hdr *pool_hdr; + void *ret; + + poolsize = type_size + total_subobjects_size; + + if ((poolsize < type_size) || (poolsize < total_subobjects_size)) { + goto overflow; + } + + if (num_subobjects == UINT_MAX) { + goto overflow; + } + num_subobjects += 1; /* the object body itself */ + + /* + * Alignment can increase the pool size by at most 15 bytes per object + * plus alignment for the object itself + */ + subobjects_slack = (TC_HDR_SIZE + TP_HDR_SIZE + 15) * num_subobjects; + if (subobjects_slack < num_subobjects) { + goto overflow; + } + + tmp = poolsize + subobjects_slack; + if ((tmp < poolsize) || (tmp < subobjects_slack)) { + goto overflow; + } + poolsize = tmp; + + ret = _talloc_pool(ctx, poolsize); + if (ret == NULL) { + return NULL; + } + + tc = talloc_chunk_from_ptr(ret); + tc->size = type_size; + + pool_hdr = talloc_pool_from_chunk(tc); + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) + VALGRIND_MAKE_MEM_UNDEFINED(pool_hdr->end, type_size); +#endif + + pool_hdr->end = ((char *)pool_hdr->end + TC_ALIGN16(type_size)); + + _tc_set_name_const(tc, type_name); + return ret; + +overflow: + return NULL; +} + +/* + setup a destructor to be called on free of a pointer + the destructor should return 0 on success, or -1 on failure. + if the destructor fails then the free is failed, and the memory can + be continued to be used +*/ +_PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*destructor)(void *)) +{ + struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); + tc->destructor = destructor; +} + +/* + increase the reference count on a piece of memory. +*/ +_PUBLIC_ int talloc_increase_ref_count(const void *ptr) +{ + if (unlikely(!talloc_reference(null_context, ptr))) { + return -1; + } + return 0; +} + +/* + helper for talloc_reference() + + this is referenced by a function pointer and should not be inline +*/ +static int talloc_reference_destructor(struct talloc_reference_handle *handle) +{ + struct talloc_chunk *ptr_tc = talloc_chunk_from_ptr(handle->ptr); + _TLIST_REMOVE(ptr_tc->refs, handle); + return 0; +} + +/* + more efficient way to add a name to a pointer - the name must point to a + true string constant +*/ +static inline void _tc_set_name_const(struct talloc_chunk *tc, + const char *name) +{ + tc->name = name; +} + +/* + internal talloc_named_const() +*/ +static inline void *_talloc_named_const(const void *context, size_t size, const char *name) +{ + void *ptr; + struct talloc_chunk *tc = NULL; + + ptr = __talloc(context, size, &tc); + if (unlikely(ptr == NULL)) { + return NULL; + } + + _tc_set_name_const(tc, name); + + return ptr; +} + +/* + make a secondary reference to a pointer, hanging off the given context. + the pointer remains valid until both the original caller and this given + context are freed. + + the major use for this is when two different structures need to reference the + same underlying data, and you want to be able to free the two instances separately, + and in either order +*/ +_PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location) +{ + struct talloc_chunk *tc; + struct talloc_reference_handle *handle; + if (unlikely(ptr == NULL)) return NULL; + + tc = talloc_chunk_from_ptr(ptr); + handle = (struct talloc_reference_handle *)_talloc_named_const(context, + sizeof(struct talloc_reference_handle), + TALLOC_MAGIC_REFERENCE); + if (unlikely(handle == NULL)) return NULL; + + /* note that we hang the destructor off the handle, not the + main context as that allows the caller to still setup their + own destructor on the context if they want to */ + talloc_set_destructor(handle, talloc_reference_destructor); + handle->ptr = discard_const_p(void, ptr); + handle->location = location; + _TLIST_ADD(tc->refs, handle); + return handle->ptr; +} + +static void *_talloc_steal_internal(const void *new_ctx, const void *ptr); + +static inline void _tc_free_poolmem(struct talloc_chunk *tc, + const char *location) +{ + struct talloc_pool_hdr *pool; + struct talloc_chunk *pool_tc; + void *next_tc; + + pool = tc->pool; + pool_tc = talloc_chunk_from_pool(pool); + next_tc = tc_next_chunk(tc); + + _talloc_chunk_set_free(tc, location); + + TC_INVALIDATE_FULL_CHUNK(tc); + + if (unlikely(pool->object_count == 0)) { + talloc_abort("Pool object count zero!"); + return; + } + + pool->object_count--; + + if (unlikely(pool->object_count == 1 + && !(pool_tc->flags & TALLOC_FLAG_FREE))) { + /* + * if there is just one object left in the pool + * and pool->flags does not have TALLOC_FLAG_FREE, + * it means this is the pool itself and + * the rest is available for new objects + * again. + */ + pool->end = tc_pool_first_chunk(pool); + tc_invalidate_pool(pool); + return; + } + + if (unlikely(pool->object_count == 0)) { + /* + * we mark the freed memory with where we called the free + * from. This means on a double free error we can report where + * the first free came from + */ + pool_tc->name = location; + + if (pool_tc->flags & TALLOC_FLAG_POOLMEM) { + _tc_free_poolmem(pool_tc, location); + } else { + /* + * The tc_memlimit_update_on_free() + * call takes into account the + * prefix TP_HDR_SIZE allocated before + * the pool talloc_chunk. + */ + tc_memlimit_update_on_free(pool_tc); + TC_INVALIDATE_FULL_CHUNK(pool_tc); + free(pool); + } + return; + } + + if (pool->end == next_tc) { + /* + * if pool->pool still points to end of + * 'tc' (which is stored in the 'next_tc' variable), + * we can reclaim the memory of 'tc'. + */ + pool->end = tc; + return; + } + + /* + * Do nothing. The memory is just "wasted", waiting for the pool + * itself to be freed. + */ +} + +static inline void _tc_free_children_internal(struct talloc_chunk *tc, + void *ptr, + const char *location); + +static inline int _talloc_free_internal(void *ptr, const char *location); + +/* + internal free call that takes a struct talloc_chunk *. +*/ +static inline int _tc_free_internal(struct talloc_chunk *tc, + const char *location) +{ + void *ptr_to_free; + void *ptr = TC_PTR_FROM_CHUNK(tc); + + if (unlikely(tc->refs)) { + int is_child; + /* check if this is a reference from a child or + * grandchild back to it's parent or grandparent + * + * in that case we need to remove the reference and + * call another instance of talloc_free() on the current + * pointer. + */ + is_child = talloc_is_parent(tc->refs, ptr); + _talloc_free_internal(tc->refs, location); + if (is_child) { + return _talloc_free_internal(ptr, location); + } + return -1; + } + + if (unlikely(tc->flags & TALLOC_FLAG_LOOP)) { + /* we have a free loop - stop looping */ + return 0; + } + + if (unlikely(tc->destructor)) { + talloc_destructor_t d = tc->destructor; + + /* + * Protect the destructor against some overwrite + * attacks, by explicitly checking it has the right + * magic here. + */ + if (talloc_chunk_from_ptr(ptr) != tc) { + /* + * This can't actually happen, the + * call itself will panic. + */ + TALLOC_ABORT("talloc_chunk_from_ptr failed!"); + } + + if (d == (talloc_destructor_t)-1) { + return -1; + } + tc->destructor = (talloc_destructor_t)-1; + if (d(ptr) == -1) { + /* + * Only replace the destructor pointer if + * calling the destructor didn't modify it. + */ + if (tc->destructor == (talloc_destructor_t)-1) { + tc->destructor = d; + } + return -1; + } + tc->destructor = NULL; + } + + if (tc->parent) { + _TLIST_REMOVE(tc->parent->child, tc); + if (tc->parent->child) { + tc->parent->child->parent = tc->parent; + } + } else { + if (tc->prev) tc->prev->next = tc->next; + if (tc->next) tc->next->prev = tc->prev; + tc->prev = tc->next = NULL; + } + + tc->flags |= TALLOC_FLAG_LOOP; + + _tc_free_children_internal(tc, ptr, location); + + _talloc_chunk_set_free(tc, location); + + if (tc->flags & TALLOC_FLAG_POOL) { + struct talloc_pool_hdr *pool; + + pool = talloc_pool_from_chunk(tc); + + if (unlikely(pool->object_count == 0)) { + talloc_abort("Pool object count zero!"); + return 0; + } + + pool->object_count--; + + if (likely(pool->object_count != 0)) { + return 0; + } + + /* + * With object_count==0, a pool becomes a normal piece of + * memory to free. If it's allocated inside a pool, it needs + * to be freed as poolmem, else it needs to be just freed. + */ + ptr_to_free = pool; + } else { + ptr_to_free = tc; + } + + if (tc->flags & TALLOC_FLAG_POOLMEM) { + _tc_free_poolmem(tc, location); + return 0; + } + + tc_memlimit_update_on_free(tc); + + TC_INVALIDATE_FULL_CHUNK(tc); + free(ptr_to_free); + return 0; +} + +/* + internal talloc_free call +*/ +static inline int _talloc_free_internal(void *ptr, const char *location) +{ + struct talloc_chunk *tc; + + if (unlikely(ptr == NULL)) { + return -1; + } + + /* possibly initialised the talloc fill value */ + if (unlikely(!talloc_fill.initialised)) { + const char *fill = getenv(TALLOC_FILL_ENV); + if (fill != NULL) { + talloc_fill.enabled = true; + talloc_fill.fill_value = strtoul(fill, NULL, 0); + } + talloc_fill.initialised = true; + } + + tc = talloc_chunk_from_ptr(ptr); + return _tc_free_internal(tc, location); +} + +static inline size_t _talloc_total_limit_size(const void *ptr, + struct talloc_memlimit *old_limit, + struct talloc_memlimit *new_limit); + +/* + move a lump of memory from one talloc context to another returning the + ptr on success, or NULL if it could not be transferred. + passing NULL as ptr will always return NULL with no side effects. +*/ +static void *_talloc_steal_internal(const void *new_ctx, const void *ptr) +{ + struct talloc_chunk *tc, *new_tc; + size_t ctx_size = 0; + + if (unlikely(!ptr)) { + return NULL; + } + + if (unlikely(new_ctx == NULL)) { + new_ctx = null_context; + } + + tc = talloc_chunk_from_ptr(ptr); + + if (tc->limit != NULL) { + + ctx_size = _talloc_total_limit_size(ptr, NULL, NULL); + + /* Decrement the memory limit from the source .. */ + talloc_memlimit_shrink(tc->limit->upper, ctx_size); + + if (tc->limit->parent == tc) { + tc->limit->upper = NULL; + } else { + tc->limit = NULL; + } + } + + if (unlikely(new_ctx == NULL)) { + if (tc->parent) { + _TLIST_REMOVE(tc->parent->child, tc); + if (tc->parent->child) { + tc->parent->child->parent = tc->parent; + } + } else { + if (tc->prev) tc->prev->next = tc->next; + if (tc->next) tc->next->prev = tc->prev; + } + + tc->parent = tc->next = tc->prev = NULL; + return discard_const_p(void, ptr); + } + + new_tc = talloc_chunk_from_ptr(new_ctx); + + if (unlikely(tc == new_tc || tc->parent == new_tc)) { + return discard_const_p(void, ptr); + } + + if (tc->parent) { + _TLIST_REMOVE(tc->parent->child, tc); + if (tc->parent->child) { + tc->parent->child->parent = tc->parent; + } + } else { + if (tc->prev) tc->prev->next = tc->next; + if (tc->next) tc->next->prev = tc->prev; + tc->prev = tc->next = NULL; + } + + tc->parent = new_tc; + if (new_tc->child) new_tc->child->parent = NULL; + _TLIST_ADD(new_tc->child, tc); + + if (tc->limit || new_tc->limit) { + ctx_size = _talloc_total_limit_size(ptr, tc->limit, + new_tc->limit); + /* .. and increment it in the destination. */ + if (new_tc->limit) { + talloc_memlimit_grow(new_tc->limit, ctx_size); + } + } + + return discard_const_p(void, ptr); +} + +/* + move a lump of memory from one talloc context to another returning the + ptr on success, or NULL if it could not be transferred. + passing NULL as ptr will always return NULL with no side effects. +*/ +_PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location) +{ + struct talloc_chunk *tc; + + if (unlikely(ptr == NULL)) { + return NULL; + } + + tc = talloc_chunk_from_ptr(ptr); + + if (unlikely(tc->refs != NULL) && talloc_parent(ptr) != new_ctx) { + struct talloc_reference_handle *h; + + talloc_log("WARNING: talloc_steal with references at %s\n", + location); + + for (h=tc->refs; h; h=h->next) { + talloc_log("\treference at %s\n", + h->location); + } + } + +#if 0 + /* this test is probably too expensive to have on in the + normal build, but it useful for debugging */ + if (talloc_is_parent(new_ctx, ptr)) { + talloc_log("WARNING: stealing into talloc child at %s\n", location); + } +#endif + + return _talloc_steal_internal(new_ctx, ptr); +} + +/* + this is like a talloc_steal(), but you must supply the old + parent. This resolves the ambiguity in a talloc_steal() which is + called on a context that has more than one parent (via references) + + The old parent can be either a reference or a parent +*/ +_PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr) +{ + struct talloc_chunk *tc; + struct talloc_reference_handle *h; + + if (unlikely(ptr == NULL)) { + return NULL; + } + + if (old_parent == talloc_parent(ptr)) { + return _talloc_steal_internal(new_parent, ptr); + } + + tc = talloc_chunk_from_ptr(ptr); + for (h=tc->refs;h;h=h->next) { + if (talloc_parent(h) == old_parent) { + if (_talloc_steal_internal(new_parent, h) != h) { + return NULL; + } + return discard_const_p(void, ptr); + } + } + + /* it wasn't a parent */ + return NULL; +} + +/* + remove a secondary reference to a pointer. This undo's what + talloc_reference() has done. The context and pointer arguments + must match those given to a talloc_reference() +*/ +static inline int talloc_unreference(const void *context, const void *ptr) +{ + struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); + struct talloc_reference_handle *h; + + if (unlikely(context == NULL)) { + context = null_context; + } + + for (h=tc->refs;h;h=h->next) { + struct talloc_chunk *p = talloc_parent_chunk(h); + if (p == NULL) { + if (context == NULL) break; + } else if (TC_PTR_FROM_CHUNK(p) == context) { + break; + } + } + if (h == NULL) { + return -1; + } + + return _talloc_free_internal(h, __location__); +} + +/* + remove a specific parent context from a pointer. This is a more + controlled variant of talloc_free() +*/ + +/* coverity[ -tainted_data_sink : arg-1 ] */ +_PUBLIC_ int talloc_unlink(const void *context, void *ptr) +{ + struct talloc_chunk *tc_p, *new_p, *tc_c; + void *new_parent; + + if (ptr == NULL) { + return -1; + } + + if (context == NULL) { + context = null_context; + } + + if (talloc_unreference(context, ptr) == 0) { + return 0; + } + + if (context != NULL) { + tc_c = talloc_chunk_from_ptr(context); + } else { + tc_c = NULL; + } + if (tc_c != talloc_parent_chunk(ptr)) { + return -1; + } + + tc_p = talloc_chunk_from_ptr(ptr); + + if (tc_p->refs == NULL) { + return _talloc_free_internal(ptr, __location__); + } + + new_p = talloc_parent_chunk(tc_p->refs); + if (new_p) { + new_parent = TC_PTR_FROM_CHUNK(new_p); + } else { + new_parent = NULL; + } + + if (talloc_unreference(new_parent, ptr) != 0) { + return -1; + } + + _talloc_steal_internal(new_parent, ptr); + + return 0; +} + +/* + add a name to an existing pointer - va_list version +*/ +static inline const char *tc_set_name_v(struct talloc_chunk *tc, + const char *fmt, + va_list ap) PRINTF_ATTRIBUTE(2,0); + +static inline const char *tc_set_name_v(struct talloc_chunk *tc, + const char *fmt, + va_list ap) +{ + struct talloc_chunk *name_tc = _vasprintf_tc(TC_PTR_FROM_CHUNK(tc), + fmt, + ap); + if (likely(name_tc)) { + tc->name = TC_PTR_FROM_CHUNK(name_tc); + _tc_set_name_const(name_tc, ".name"); + } else { + tc->name = NULL; + } + return tc->name; +} + +/* + add a name to an existing pointer +*/ +_PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...) +{ + struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); + const char *name; + va_list ap; + va_start(ap, fmt); + name = tc_set_name_v(tc, fmt, ap); + va_end(ap); + return name; +} + + +/* + create a named talloc pointer. Any talloc pointer can be named, and + talloc_named() operates just like talloc() except that it allows you + to name the pointer. +*/ +_PUBLIC_ void *talloc_named(const void *context, size_t size, const char *fmt, ...) +{ + va_list ap; + void *ptr; + const char *name; + struct talloc_chunk *tc = NULL; + + ptr = __talloc(context, size, &tc); + if (unlikely(ptr == NULL)) return NULL; + + va_start(ap, fmt); + name = tc_set_name_v(tc, fmt, ap); + va_end(ap); + + if (unlikely(name == NULL)) { + _talloc_free_internal(ptr, __location__); + return NULL; + } + + return ptr; +} + +/* + return the name of a talloc ptr, or "UNNAMED" +*/ +static inline const char *__talloc_get_name(const void *ptr) +{ + struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); + if (unlikely(tc->name == TALLOC_MAGIC_REFERENCE)) { + return ".reference"; + } + if (likely(tc->name)) { + return tc->name; + } + return "UNNAMED"; +} + +_PUBLIC_ const char *talloc_get_name(const void *ptr) +{ + return __talloc_get_name(ptr); +} + +/* + check if a pointer has the given name. If it does, return the pointer, + otherwise return NULL +*/ +_PUBLIC_ void *talloc_check_name(const void *ptr, const char *name) +{ + const char *pname; + if (unlikely(ptr == NULL)) return NULL; + pname = __talloc_get_name(ptr); + if (likely(pname == name || strcmp(pname, name) == 0)) { + return discard_const_p(void, ptr); + } + return NULL; +} + +static void talloc_abort_type_mismatch(const char *location, + const char *name, + const char *expected) +{ + const char *reason; + + reason = talloc_asprintf(NULL, + "%s: Type mismatch: name[%s] expected[%s]", + location, + name?name:"NULL", + expected); + if (!reason) { + reason = "Type mismatch"; + } + + talloc_abort(reason); +} + +_PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location) +{ + const char *pname; + + if (unlikely(ptr == NULL)) { + talloc_abort_type_mismatch(location, NULL, name); + return NULL; + } + + pname = __talloc_get_name(ptr); + if (likely(pname == name || strcmp(pname, name) == 0)) { + return discard_const_p(void, ptr); + } + + talloc_abort_type_mismatch(location, pname, name); + return NULL; +} + +/* + this is for compatibility with older versions of talloc +*/ +_PUBLIC_ void *talloc_init(const char *fmt, ...) +{ + va_list ap; + void *ptr; + const char *name; + struct talloc_chunk *tc = NULL; + + ptr = __talloc(NULL, 0, &tc); + if (unlikely(ptr == NULL)) return NULL; + + va_start(ap, fmt); + name = tc_set_name_v(tc, fmt, ap); + va_end(ap); + + if (unlikely(name == NULL)) { + _talloc_free_internal(ptr, __location__); + return NULL; + } + + return ptr; +} + +static inline void _tc_free_children_internal(struct talloc_chunk *tc, + void *ptr, + const char *location) +{ + while (tc->child) { + /* we need to work out who will own an abandoned child + if it cannot be freed. In priority order, the first + choice is owner of any remaining reference to this + pointer, the second choice is our parent, and the + final choice is the null context. */ + void *child = TC_PTR_FROM_CHUNK(tc->child); + const void *new_parent = null_context; + if (unlikely(tc->child->refs)) { + struct talloc_chunk *p = talloc_parent_chunk(tc->child->refs); + if (p) new_parent = TC_PTR_FROM_CHUNK(p); + } + if (unlikely(_tc_free_internal(tc->child, location) == -1)) { + if (talloc_parent_chunk(child) != tc) { + /* + * Destructor already reparented this child. + * No further reparenting needed. + */ + continue; + } + if (new_parent == null_context) { + struct talloc_chunk *p = talloc_parent_chunk(ptr); + if (p) new_parent = TC_PTR_FROM_CHUNK(p); + } + _talloc_steal_internal(new_parent, child); + } + } +} + +/* + this is a replacement for the Samba3 talloc_destroy_pool functionality. It + should probably not be used in new code. It's in here to keep the talloc + code consistent across Samba 3 and 4. +*/ +_PUBLIC_ void talloc_free_children(void *ptr) +{ + struct talloc_chunk *tc_name = NULL; + struct talloc_chunk *tc; + + if (unlikely(ptr == NULL)) { + return; + } + + tc = talloc_chunk_from_ptr(ptr); + + /* we do not want to free the context name if it is a child .. */ + if (likely(tc->child)) { + for (tc_name = tc->child; tc_name; tc_name = tc_name->next) { + if (tc->name == TC_PTR_FROM_CHUNK(tc_name)) break; + } + if (tc_name) { + _TLIST_REMOVE(tc->child, tc_name); + if (tc->child) { + tc->child->parent = tc; + } + } + } + + _tc_free_children_internal(tc, ptr, __location__); + + /* .. so we put it back after all other children have been freed */ + if (tc_name) { + if (tc->child) { + tc->child->parent = NULL; + } + tc_name->parent = tc; + _TLIST_ADD(tc->child, tc_name); + } +} + +/* + Allocate a bit of memory as a child of an existing pointer +*/ +_PUBLIC_ void *_talloc(const void *context, size_t size) +{ + struct talloc_chunk *tc; + return __talloc(context, size, &tc); +} + +/* + externally callable talloc_set_name_const() +*/ +_PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name) +{ + _tc_set_name_const(talloc_chunk_from_ptr(ptr), name); +} + +/* + create a named talloc pointer. Any talloc pointer can be named, and + talloc_named() operates just like talloc() except that it allows you + to name the pointer. +*/ +_PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name) +{ + return _talloc_named_const(context, size, name); +} + +/* + free a talloc pointer. This also frees all child pointers of this + pointer recursively + + return 0 if the memory is actually freed, otherwise -1. The memory + will not be freed if the ref_count is > 1 or the destructor (if + any) returns non-zero +*/ +_PUBLIC_ int _talloc_free(void *ptr, const char *location) +{ + struct talloc_chunk *tc; + + if (unlikely(ptr == NULL)) { + return -1; + } + + tc = talloc_chunk_from_ptr(ptr); + + if (unlikely(tc->refs != NULL)) { + struct talloc_reference_handle *h; + + if (talloc_parent(ptr) == null_context && tc->refs->next == NULL) { + /* in this case we do know which parent should + get this pointer, as there is really only + one parent */ + return talloc_unlink(null_context, ptr); + } + + talloc_log("ERROR: talloc_free with references at %s\n", + location); + + for (h=tc->refs; h; h=h->next) { + talloc_log("\treference at %s\n", + h->location); + } + return -1; + } + + return _talloc_free_internal(ptr, location); +} + + + +/* + A talloc version of realloc. The context argument is only used if + ptr is NULL +*/ +_PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name) +{ + struct talloc_chunk *tc; + void *new_ptr; + bool malloced = false; + struct talloc_pool_hdr *pool_hdr = NULL; + size_t old_size = 0; + size_t new_size = 0; + + /* size zero is equivalent to free() */ + if (unlikely(size == 0)) { + talloc_unlink(context, ptr); + return NULL; + } + + if (unlikely(size >= MAX_TALLOC_SIZE)) { + return NULL; + } + + /* realloc(NULL) is equivalent to malloc() */ + if (ptr == NULL) { + return _talloc_named_const(context, size, name); + } + + tc = talloc_chunk_from_ptr(ptr); + + /* don't allow realloc on referenced pointers */ + if (unlikely(tc->refs)) { + return NULL; + } + + /* don't let anybody try to realloc a talloc_pool */ + if (unlikely(tc->flags & TALLOC_FLAG_POOL)) { + return NULL; + } + + /* handle realloc inside a talloc_pool */ + if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) { + pool_hdr = tc->pool; + } + + /* don't shrink if we have less than 1k to gain */ + if (size < tc->size && tc->limit == NULL) { + if (pool_hdr) { + void *next_tc = tc_next_chunk(tc); + TC_INVALIDATE_SHRINK_CHUNK(tc, size); + tc->size = size; + if (next_tc == pool_hdr->end) { + /* note: tc->size has changed, so this works */ + pool_hdr->end = tc_next_chunk(tc); + } + return ptr; + } else if ((tc->size - size) < 1024) { + /* + * if we call TC_INVALIDATE_SHRINK_CHUNK() here + * we would need to call TC_UNDEFINE_GROW_CHUNK() + * after each realloc call, which slows down + * testing a lot :-(. + * + * That is why we only mark memory as undefined here. + */ + TC_UNDEFINE_SHRINK_CHUNK(tc, size); + + /* do not shrink if we have less than 1k to gain */ + tc->size = size; + return ptr; + } + } else if (tc->size == size) { + /* + * do not change the pointer if it is exactly + * the same size. + */ + return ptr; + } + + /* + * by resetting magic we catch users of the old memory + * + * We mark this memory as free, and also over-stamp the talloc + * magic with the old-style magic. + * + * Why? This tries to avoid a memory read use-after-free from + * disclosing our talloc magic, which would then allow an + * attacker to prepare a valid header and so run a destructor. + * + * What else? We have to re-stamp back a valid normal magic + * on this memory once realloc() is done, as it will have done + * a memcpy() into the new valid memory. We can't do this in + * reverse as that would be a real use-after-free. + */ + _talloc_chunk_set_free(tc, NULL); + + if (pool_hdr) { + struct talloc_chunk *pool_tc; + void *next_tc = tc_next_chunk(tc); + size_t old_chunk_size = TC_ALIGN16(TC_HDR_SIZE + tc->size); + size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size); + size_t space_needed; + size_t space_left; + unsigned int chunk_count = pool_hdr->object_count; + + pool_tc = talloc_chunk_from_pool(pool_hdr); + if (!(pool_tc->flags & TALLOC_FLAG_FREE)) { + chunk_count -= 1; + } + + if (chunk_count == 1) { + /* + * optimize for the case where 'tc' is the only + * chunk in the pool. + */ + char *start = tc_pool_first_chunk(pool_hdr); + space_needed = new_chunk_size; + space_left = (char *)tc_pool_end(pool_hdr) - start; + + if (space_left >= space_needed) { + size_t old_used = TC_HDR_SIZE + tc->size; + size_t new_used = TC_HDR_SIZE + size; + new_ptr = start; + +#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED) + { + /* + * The area from + * start -> tc may have + * been freed and thus been marked as + * VALGRIND_MEM_NOACCESS. Set it to + * VALGRIND_MEM_UNDEFINED so we can + * copy into it without valgrind errors. + * We can't just mark + * new_ptr -> new_ptr + old_used + * as this may overlap on top of tc, + * (which is why we use memmove, not + * memcpy below) hence the MIN. + */ + size_t undef_len = MIN((((char *)tc) - ((char *)new_ptr)),old_used); + VALGRIND_MAKE_MEM_UNDEFINED(new_ptr, undef_len); + } +#endif + + memmove(new_ptr, tc, old_used); + + tc = (struct talloc_chunk *)new_ptr; + TC_UNDEFINE_GROW_CHUNK(tc, size); + + /* + * first we do not align the pool pointer + * because we want to invalidate the padding + * too. + */ + pool_hdr->end = new_used + (char *)new_ptr; + tc_invalidate_pool(pool_hdr); + + /* now the aligned pointer */ + pool_hdr->end = new_chunk_size + (char *)new_ptr; + goto got_new_ptr; + } + + next_tc = NULL; + } + + if (new_chunk_size == old_chunk_size) { + TC_UNDEFINE_GROW_CHUNK(tc, size); + _talloc_chunk_set_not_free(tc); + tc->size = size; + return ptr; + } + + if (next_tc == pool_hdr->end) { + /* + * optimize for the case where 'tc' is the last + * chunk in the pool. + */ + space_needed = new_chunk_size - old_chunk_size; + space_left = tc_pool_space_left(pool_hdr); + + if (space_left >= space_needed) { + TC_UNDEFINE_GROW_CHUNK(tc, size); + _talloc_chunk_set_not_free(tc); + tc->size = size; + pool_hdr->end = tc_next_chunk(tc); + return ptr; + } + } + + new_ptr = tc_alloc_pool(tc, size + TC_HDR_SIZE, 0); + + if (new_ptr == NULL) { + /* + * Couldn't allocate from pool (pool size + * counts as already allocated for memlimit + * purposes). We must check memory limit + * before any real malloc. + */ + if (tc->limit) { + /* + * Note we're doing an extra malloc, + * on top of the pool size, so account + * for size only, not the difference + * between old and new size. + */ + if (!talloc_memlimit_check(tc->limit, size)) { + _talloc_chunk_set_not_free(tc); + errno = ENOMEM; + return NULL; + } + } + new_ptr = malloc(TC_HDR_SIZE+size); + malloced = true; + new_size = size; + } + + if (new_ptr) { + memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE); + + _tc_free_poolmem(tc, __location__ "_talloc_realloc"); + } + } + else { + /* We're doing realloc here, so record the difference. */ + old_size = tc->size; + new_size = size; + /* + * We must check memory limit + * before any real realloc. + */ + if (tc->limit && (size > old_size)) { + if (!talloc_memlimit_check(tc->limit, + (size - old_size))) { + _talloc_chunk_set_not_free(tc); + errno = ENOMEM; + return NULL; + } + } + new_ptr = realloc(tc, size + TC_HDR_SIZE); + } +got_new_ptr: + + if (unlikely(!new_ptr)) { + /* + * Ok, this is a strange spot. We have to put back + * the old talloc_magic and any flags, except the + * TALLOC_FLAG_FREE as this was not free'ed by the + * realloc() call after all + */ + _talloc_chunk_set_not_free(tc); + return NULL; + } + + /* + * tc is now the new value from realloc(), the old memory we + * can't access any more and was preemptively marked as + * TALLOC_FLAG_FREE before the call. Now we mark it as not + * free again + */ + tc = (struct talloc_chunk *)new_ptr; + _talloc_chunk_set_not_free(tc); + if (malloced) { + tc->flags &= ~TALLOC_FLAG_POOLMEM; + } + if (tc->parent) { + tc->parent->child = tc; + } + if (tc->child) { + tc->child->parent = tc; + } + + if (tc->prev) { + tc->prev->next = tc; + } + if (tc->next) { + tc->next->prev = tc; + } + + if (new_size > old_size) { + talloc_memlimit_grow(tc->limit, new_size - old_size); + } else if (new_size < old_size) { + talloc_memlimit_shrink(tc->limit, old_size - new_size); + } + + tc->size = size; + _tc_set_name_const(tc, name); + + return TC_PTR_FROM_CHUNK(tc); +} + +/* + a wrapper around talloc_steal() for situations where you are moving a pointer + between two structures, and want the old pointer to be set to NULL +*/ +_PUBLIC_ void *_talloc_move(const void *new_ctx, const void *_pptr) +{ + const void **pptr = discard_const_p(const void *,_pptr); + void *ret = talloc_steal(new_ctx, discard_const_p(void, *pptr)); + (*pptr) = NULL; + return ret; +} + +enum talloc_mem_count_type { + TOTAL_MEM_SIZE, + TOTAL_MEM_BLOCKS, + TOTAL_MEM_LIMIT, +}; + +static inline size_t _talloc_total_mem_internal(const void *ptr, + enum talloc_mem_count_type type, + struct talloc_memlimit *old_limit, + struct talloc_memlimit *new_limit) +{ + size_t total = 0; + struct talloc_chunk *c, *tc; + + if (ptr == NULL) { + ptr = null_context; + } + if (ptr == NULL) { + return 0; + } + + tc = talloc_chunk_from_ptr(ptr); + + if (old_limit || new_limit) { + if (tc->limit && tc->limit->upper == old_limit) { + tc->limit->upper = new_limit; + } + } + + /* optimize in the memlimits case */ + if (type == TOTAL_MEM_LIMIT && + tc->limit != NULL && + tc->limit != old_limit && + tc->limit->parent == tc) { + return tc->limit->cur_size; + } + + if (tc->flags & TALLOC_FLAG_LOOP) { + return 0; + } + + tc->flags |= TALLOC_FLAG_LOOP; + + if (old_limit || new_limit) { + if (old_limit == tc->limit) { + tc->limit = new_limit; + } + } + + switch (type) { + case TOTAL_MEM_SIZE: + if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) { + total = tc->size; + } + break; + case TOTAL_MEM_BLOCKS: + total++; + break; + case TOTAL_MEM_LIMIT: + if (likely(tc->name != TALLOC_MAGIC_REFERENCE)) { + /* + * Don't count memory allocated from a pool + * when calculating limits. Only count the + * pool itself. + */ + if (!(tc->flags & TALLOC_FLAG_POOLMEM)) { + if (tc->flags & TALLOC_FLAG_POOL) { + /* + * If this is a pool, the allocated + * size is in the pool header, and + * remember to add in the prefix + * length. + */ + struct talloc_pool_hdr *pool_hdr + = talloc_pool_from_chunk(tc); + total = pool_hdr->poolsize + + TC_HDR_SIZE + + TP_HDR_SIZE; + } else { + total = tc->size + TC_HDR_SIZE; + } + } + } + break; + } + for (c = tc->child; c; c = c->next) { + total += _talloc_total_mem_internal(TC_PTR_FROM_CHUNK(c), type, + old_limit, new_limit); + } + + tc->flags &= ~TALLOC_FLAG_LOOP; + + return total; +} + +/* + return the total size of a talloc pool (subtree) +*/ +_PUBLIC_ size_t talloc_total_size(const void *ptr) +{ + return _talloc_total_mem_internal(ptr, TOTAL_MEM_SIZE, NULL, NULL); +} + +/* + return the total number of blocks in a talloc pool (subtree) +*/ +_PUBLIC_ size_t talloc_total_blocks(const void *ptr) +{ + return _talloc_total_mem_internal(ptr, TOTAL_MEM_BLOCKS, NULL, NULL); +} + +/* + return the number of external references to a pointer +*/ +_PUBLIC_ size_t talloc_reference_count(const void *ptr) +{ + struct talloc_chunk *tc = talloc_chunk_from_ptr(ptr); + struct talloc_reference_handle *h; + size_t ret = 0; + + for (h=tc->refs;h;h=h->next) { + ret++; + } + return ret; +} + +/* + report on memory usage by all children of a pointer, giving a full tree view +*/ +_PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, + void (*callback)(const void *ptr, + int depth, int max_depth, + int is_ref, + void *private_data), + void *private_data) +{ + struct talloc_chunk *c, *tc; + + if (ptr == NULL) { + ptr = null_context; + } + if (ptr == NULL) return; + + tc = talloc_chunk_from_ptr(ptr); + + if (tc->flags & TALLOC_FLAG_LOOP) { + return; + } + + callback(ptr, depth, max_depth, 0, private_data); + + if (max_depth >= 0 && depth >= max_depth) { + return; + } + + tc->flags |= TALLOC_FLAG_LOOP; + for (c=tc->child;c;c=c->next) { + if (c->name == TALLOC_MAGIC_REFERENCE) { + struct talloc_reference_handle *h = (struct talloc_reference_handle *)TC_PTR_FROM_CHUNK(c); + callback(h->ptr, depth + 1, max_depth, 1, private_data); + } else { + talloc_report_depth_cb(TC_PTR_FROM_CHUNK(c), depth + 1, max_depth, callback, private_data); + } + } + tc->flags &= ~TALLOC_FLAG_LOOP; +} + +static void talloc_report_depth_FILE_helper(const void *ptr, int depth, int max_depth, int is_ref, void *_f) +{ + const char *name = __talloc_get_name(ptr); + struct talloc_chunk *tc; + FILE *f = (FILE *)_f; + + if (is_ref) { + fprintf(f, "%*sreference to: %s\n", depth*4, "", name); + return; + } + + tc = talloc_chunk_from_ptr(ptr); + if (tc->limit && tc->limit->parent == tc) { + fprintf(f, "%*s%-30s is a memlimit context" + " (max_size = %lu bytes, cur_size = %lu bytes)\n", + depth*4, "", + name, + (unsigned long)tc->limit->max_size, + (unsigned long)tc->limit->cur_size); + } + + if (depth == 0) { + fprintf(f,"%stalloc report on '%s' (total %6lu bytes in %3lu blocks)\n", + (max_depth < 0 ? "full " :""), name, + (unsigned long)talloc_total_size(ptr), + (unsigned long)talloc_total_blocks(ptr)); + return; + } + + fprintf(f, "%*s%-30s contains %6lu bytes in %3lu blocks (ref %d) %p\n", + depth*4, "", + name, + (unsigned long)talloc_total_size(ptr), + (unsigned long)talloc_total_blocks(ptr), + (int)talloc_reference_count(ptr), ptr); + +#if 0 + fprintf(f, "content: "); + if (talloc_total_size(ptr)) { + int tot = talloc_total_size(ptr); + int i; + + for (i = 0; i < tot; i++) { + if ((((char *)ptr)[i] > 31) && (((char *)ptr)[i] < 126)) { + fprintf(f, "%c", ((char *)ptr)[i]); + } else { + fprintf(f, "~%02x", ((char *)ptr)[i]); + } + } + } + fprintf(f, "\n"); +#endif +} + +/* + report on memory usage by all children of a pointer, giving a full tree view +*/ +_PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f) +{ + if (f) { + talloc_report_depth_cb(ptr, depth, max_depth, talloc_report_depth_FILE_helper, f); + fflush(f); + } +} + +/* + report on memory usage by all children of a pointer, giving a full tree view +*/ +_PUBLIC_ void talloc_report_full(const void *ptr, FILE *f) +{ + talloc_report_depth_file(ptr, 0, -1, f); +} + +/* + report on memory usage by all children of a pointer +*/ +_PUBLIC_ void talloc_report(const void *ptr, FILE *f) +{ + talloc_report_depth_file(ptr, 0, 1, f); +} + +/* + enable tracking of the NULL context +*/ +_PUBLIC_ void talloc_enable_null_tracking(void) +{ + if (null_context == NULL) { + null_context = _talloc_named_const(NULL, 0, "null_context"); + if (autofree_context != NULL) { + talloc_reparent(NULL, null_context, autofree_context); + } + } +} + +/* + enable tracking of the NULL context, not moving the autofree context + into the NULL context. This is needed for the talloc testsuite +*/ +_PUBLIC_ void talloc_enable_null_tracking_no_autofree(void) +{ + if (null_context == NULL) { + null_context = _talloc_named_const(NULL, 0, "null_context"); + } +} + +/* + disable tracking of the NULL context +*/ +_PUBLIC_ void talloc_disable_null_tracking(void) +{ + if (null_context != NULL) { + /* we have to move any children onto the real NULL + context */ + struct talloc_chunk *tc, *tc2; + tc = talloc_chunk_from_ptr(null_context); + for (tc2 = tc->child; tc2; tc2=tc2->next) { + if (tc2->parent == tc) tc2->parent = NULL; + if (tc2->prev == tc) tc2->prev = NULL; + } + for (tc2 = tc->next; tc2; tc2=tc2->next) { + if (tc2->parent == tc) tc2->parent = NULL; + if (tc2->prev == tc) tc2->prev = NULL; + } + tc->child = NULL; + tc->next = NULL; + } + talloc_free(null_context); + null_context = NULL; +} + +/* + enable leak reporting on exit +*/ +_PUBLIC_ void talloc_enable_leak_report(void) +{ + talloc_enable_null_tracking(); + talloc_report_null = true; + talloc_setup_atexit(); +} + +/* + enable full leak reporting on exit +*/ +_PUBLIC_ void talloc_enable_leak_report_full(void) +{ + talloc_enable_null_tracking(); + talloc_report_null_full = true; + talloc_setup_atexit(); +} + +/* + talloc and zero memory. +*/ +_PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name) +{ + void *p = _talloc_named_const(ctx, size, name); + + if (p) { + memset(p, '\0', size); + } + + return p; +} + +/* + memdup with a talloc. +*/ +_PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name) +{ + void *newp = NULL; + + if (likely(size > 0) && unlikely(p == NULL)) { + return NULL; + } + + newp = _talloc_named_const(t, size, name); + if (likely(newp != NULL) && likely(size > 0)) { + memcpy(newp, p, size); + } + + return newp; +} + +static inline char *__talloc_strlendup(const void *t, const char *p, size_t len) +{ + char *ret; + struct talloc_chunk *tc = NULL; + + ret = (char *)__talloc(t, len + 1, &tc); + if (unlikely(!ret)) return NULL; + + memcpy(ret, p, len); + ret[len] = 0; + + _tc_set_name_const(tc, ret); + return ret; +} + +/* + strdup with a talloc +*/ +_PUBLIC_ char *talloc_strdup(const void *t, const char *p) +{ + if (unlikely(!p)) return NULL; + return __talloc_strlendup(t, p, strlen(p)); +} + +/* + strndup with a talloc +*/ +_PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n) +{ + if (unlikely(!p)) return NULL; + return __talloc_strlendup(t, p, strnlen(p, n)); +} + +static inline char *__talloc_strlendup_append(char *s, size_t slen, + const char *a, size_t alen) +{ + char *ret; + + ret = talloc_realloc(NULL, s, char, slen + alen + 1); + if (unlikely(!ret)) return NULL; + + /* append the string and the trailing \0 */ + memcpy(&ret[slen], a, alen); + ret[slen+alen] = 0; + + _tc_set_name_const(talloc_chunk_from_ptr(ret), ret); + return ret; +} + +/* + * Appends at the end of the string. + */ +_PUBLIC_ char *talloc_strdup_append(char *s, const char *a) +{ + if (unlikely(!s)) { + return talloc_strdup(NULL, a); + } + + if (unlikely(!a)) { + return s; + } + + return __talloc_strlendup_append(s, strlen(s), a, strlen(a)); +} + +/* + * Appends at the end of the talloc'ed buffer, + * not the end of the string. + */ +_PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a) +{ + size_t slen; + + if (unlikely(!s)) { + return talloc_strdup(NULL, a); + } + + if (unlikely(!a)) { + return s; + } + + slen = talloc_get_size(s); + if (likely(slen > 0)) { + slen--; + } + + return __talloc_strlendup_append(s, slen, a, strlen(a)); +} + +/* + * Appends at the end of the string. + */ +_PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n) +{ + if (unlikely(!s)) { + return talloc_strndup(NULL, a, n); + } + + if (unlikely(!a)) { + return s; + } + + return __talloc_strlendup_append(s, strlen(s), a, strnlen(a, n)); +} + +/* + * Appends at the end of the talloc'ed buffer, + * not the end of the string. + */ +_PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n) +{ + size_t slen; + + if (unlikely(!s)) { + return talloc_strndup(NULL, a, n); + } + + if (unlikely(!a)) { + return s; + } + + slen = talloc_get_size(s); + if (likely(slen > 0)) { + slen--; + } + + return __talloc_strlendup_append(s, slen, a, strnlen(a, n)); +} + +#ifndef HAVE_VA_COPY +#ifdef HAVE___VA_COPY +#define va_copy(dest, src) __va_copy(dest, src) +#else +#define va_copy(dest, src) (dest) = (src) +#endif +#endif + +static struct talloc_chunk *_vasprintf_tc(const void *t, + const char *fmt, + va_list ap) PRINTF_ATTRIBUTE(2,0); + +static struct talloc_chunk *_vasprintf_tc(const void *t, + const char *fmt, + va_list ap) +{ + int vlen; + size_t len; + char *ret; + va_list ap2; + struct talloc_chunk *tc = NULL; + char buf[1024]; + + va_copy(ap2, ap); + vlen = vsnprintf(buf, sizeof(buf), fmt, ap2); + va_end(ap2); + if (unlikely(vlen < 0)) { + return NULL; + } + len = vlen; + if (unlikely(len + 1 < len)) { + return NULL; + } + + ret = (char *)__talloc(t, len+1, &tc); + if (unlikely(!ret)) return NULL; + + if (len < sizeof(buf)) { + memcpy(ret, buf, len+1); + } else { + va_copy(ap2, ap); + vsnprintf(ret, len+1, fmt, ap2); + va_end(ap2); + } + + _tc_set_name_const(tc, ret); + return tc; +} + +_PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) +{ + struct talloc_chunk *tc = _vasprintf_tc(t, fmt, ap); + if (tc == NULL) { + return NULL; + } + return TC_PTR_FROM_CHUNK(tc); +} + + +/* + Perform string formatting, and return a pointer to newly allocated + memory holding the result, inside a memory pool. + */ +_PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...) +{ + va_list ap; + char *ret; + + va_start(ap, fmt); + ret = talloc_vasprintf(t, fmt, ap); + va_end(ap); + return ret; +} + +static inline char *__talloc_vaslenprintf_append(char *s, size_t slen, + const char *fmt, va_list ap) + PRINTF_ATTRIBUTE(3,0); + +static inline char *__talloc_vaslenprintf_append(char *s, size_t slen, + const char *fmt, va_list ap) +{ + ssize_t alen; + va_list ap2; + char c; + + va_copy(ap2, ap); + /* this call looks strange, but it makes it work on older solaris boxes */ + alen = vsnprintf(&c, 1, fmt, ap2); + va_end(ap2); + + if (alen <= 0) { + /* Either the vsnprintf failed or the format resulted in + * no characters being formatted. In the former case, we + * ought to return NULL, in the latter we ought to return + * the original string. Most current callers of this + * function expect it to never return NULL. + */ + return s; + } + + s = talloc_realloc(NULL, s, char, slen + alen + 1); + if (!s) return NULL; + + vsnprintf(s + slen, alen + 1, fmt, ap); + + _tc_set_name_const(talloc_chunk_from_ptr(s), s); + return s; +} + +/** + * Realloc @p s to append the formatted result of @p fmt and @p ap, + * and return @p s, which may have moved. Good for gradually + * accumulating output into a string buffer. Appends at the end + * of the string. + **/ +_PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) +{ + if (unlikely(!s)) { + return talloc_vasprintf(NULL, fmt, ap); + } + + return __talloc_vaslenprintf_append(s, strlen(s), fmt, ap); +} + +/** + * Realloc @p s to append the formatted result of @p fmt and @p ap, + * and return @p s, which may have moved. Always appends at the + * end of the talloc'ed buffer, not the end of the string. + **/ +_PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) +{ + size_t slen; + + if (unlikely(!s)) { + return talloc_vasprintf(NULL, fmt, ap); + } + + slen = talloc_get_size(s); + if (likely(slen > 0)) { + slen--; + } + + return __talloc_vaslenprintf_append(s, slen, fmt, ap); +} + +/* + Realloc @p s to append the formatted result of @p fmt and return @p + s, which may have moved. Good for gradually accumulating output + into a string buffer. + */ +_PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + s = talloc_vasprintf_append(s, fmt, ap); + va_end(ap); + return s; +} + +/* + Realloc @p s to append the formatted result of @p fmt and return @p + s, which may have moved. Good for gradually accumulating output + into a buffer. + */ +_PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + s = talloc_vasprintf_append_buffer(s, fmt, ap); + va_end(ap); + return s; +} + +_PUBLIC_ void talloc_asprintf_addbuf(char **ps, const char *fmt, ...) +{ + va_list ap; + char *s = *ps; + char *t = NULL; + + if (s == NULL) { + return; + } + + va_start(ap, fmt); + t = talloc_vasprintf_append_buffer(s, fmt, ap); + va_end(ap); + + if (t == NULL) { + /* signal failure to the next caller */ + TALLOC_FREE(s); + *ps = NULL; + } else { + *ps = t; + } +} + +/* + alloc an array, checking for integer overflow in the array size +*/ +_PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name) +{ + if (count >= MAX_TALLOC_SIZE/el_size) { + return NULL; + } + return _talloc_named_const(ctx, el_size * count, name); +} + +/* + alloc an zero array, checking for integer overflow in the array size +*/ +_PUBLIC_ void *_talloc_zero_array(const void *ctx, size_t el_size, unsigned count, const char *name) +{ + if (count >= MAX_TALLOC_SIZE/el_size) { + return NULL; + } + return _talloc_zero(ctx, el_size * count, name); +} + +/* + realloc an array, checking for integer overflow in the array size +*/ +_PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name) +{ + if (count >= MAX_TALLOC_SIZE/el_size) { + return NULL; + } + return _talloc_realloc(ctx, ptr, el_size * count, name); +} + +/* + a function version of talloc_realloc(), so it can be passed as a function pointer + to libraries that want a realloc function (a realloc function encapsulates + all the basic capabilities of an allocation library, which is why this is useful) +*/ +_PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size) +{ + return _talloc_realloc(context, ptr, size, NULL); +} + + +static int talloc_autofree_destructor(void *ptr) +{ + autofree_context = NULL; + return 0; +} + +/* + return a context which will be auto-freed on exit + this is useful for reducing the noise in leak reports +*/ +_PUBLIC_ void *talloc_autofree_context(void) +{ + if (autofree_context == NULL) { + autofree_context = _talloc_named_const(NULL, 0, "autofree_context"); + talloc_set_destructor(autofree_context, talloc_autofree_destructor); + talloc_setup_atexit(); + } + return autofree_context; +} + +_PUBLIC_ size_t talloc_get_size(const void *context) +{ + struct talloc_chunk *tc; + + if (context == NULL) { + return 0; + } + + tc = talloc_chunk_from_ptr(context); + + return tc->size; +} + +/* + find a parent of this context that has the given name, if any +*/ +_PUBLIC_ void *talloc_find_parent_byname(const void *context, const char *name) +{ + struct talloc_chunk *tc; + + if (context == NULL) { + return NULL; + } + + tc = talloc_chunk_from_ptr(context); + while (tc) { + if (tc->name && strcmp(tc->name, name) == 0) { + return TC_PTR_FROM_CHUNK(tc); + } + while (tc && tc->prev) tc = tc->prev; + if (tc) { + tc = tc->parent; + } + } + return NULL; +} + +/* + show the parentage of a context +*/ +_PUBLIC_ void talloc_show_parents(const void *context, FILE *file) +{ + struct talloc_chunk *tc; + + if (context == NULL) { + fprintf(file, "talloc no parents for NULL\n"); + return; + } + + tc = talloc_chunk_from_ptr(context); + fprintf(file, "talloc parents of '%s'\n", __talloc_get_name(context)); + while (tc) { + fprintf(file, "\t'%s'\n", __talloc_get_name(TC_PTR_FROM_CHUNK(tc))); + while (tc && tc->prev) tc = tc->prev; + if (tc) { + tc = tc->parent; + } + } + fflush(file); +} + +/* + return 1 if ptr is a parent of context +*/ +static int _talloc_is_parent(const void *context, const void *ptr, int depth) +{ + struct talloc_chunk *tc; + + if (context == NULL) { + return 0; + } + + tc = talloc_chunk_from_ptr(context); + while (tc) { + if (depth <= 0) { + return 0; + } + if (TC_PTR_FROM_CHUNK(tc) == ptr) return 1; + while (tc && tc->prev) tc = tc->prev; + if (tc) { + tc = tc->parent; + depth--; + } + } + return 0; +} + +/* + return 1 if ptr is a parent of context +*/ +_PUBLIC_ int talloc_is_parent(const void *context, const void *ptr) +{ + return _talloc_is_parent(context, ptr, TALLOC_MAX_DEPTH); +} + +/* + return the total size of memory used by this context and all children +*/ +static inline size_t _talloc_total_limit_size(const void *ptr, + struct talloc_memlimit *old_limit, + struct talloc_memlimit *new_limit) +{ + return _talloc_total_mem_internal(ptr, TOTAL_MEM_LIMIT, + old_limit, new_limit); +} + +static inline bool talloc_memlimit_check(struct talloc_memlimit *limit, size_t size) +{ + struct talloc_memlimit *l; + + for (l = limit; l != NULL; l = l->upper) { + if (l->max_size != 0 && + ((l->max_size <= l->cur_size) || + (l->max_size - l->cur_size < size))) { + return false; + } + } + + return true; +} + +/* + Update memory limits when freeing a talloc_chunk. +*/ +static void tc_memlimit_update_on_free(struct talloc_chunk *tc) +{ + size_t limit_shrink_size; + + if (!tc->limit) { + return; + } + + /* + * Pool entries don't count. Only the pools + * themselves are counted as part of the memory + * limits. Note that this also takes care of + * nested pools which have both flags + * TALLOC_FLAG_POOLMEM|TALLOC_FLAG_POOL set. + */ + if (tc->flags & TALLOC_FLAG_POOLMEM) { + return; + } + + /* + * If we are part of a memory limited context hierarchy + * we need to subtract the memory used from the counters + */ + + limit_shrink_size = tc->size+TC_HDR_SIZE; + + /* + * If we're deallocating a pool, take into + * account the prefix size added for the pool. + */ + + if (tc->flags & TALLOC_FLAG_POOL) { + limit_shrink_size += TP_HDR_SIZE; + } + + talloc_memlimit_shrink(tc->limit, limit_shrink_size); + + if (tc->limit->parent == tc) { + free(tc->limit); + } + + tc->limit = NULL; +} + +/* + Increase memory limit accounting after a malloc/realloc. +*/ +static void talloc_memlimit_grow(struct talloc_memlimit *limit, + size_t size) +{ + struct talloc_memlimit *l; + + for (l = limit; l != NULL; l = l->upper) { + size_t new_cur_size = l->cur_size + size; + if (new_cur_size < l->cur_size) { + talloc_abort("logic error in talloc_memlimit_grow\n"); + return; + } + l->cur_size = new_cur_size; + } +} + +/* + Decrease memory limit accounting after a free/realloc. +*/ +static void talloc_memlimit_shrink(struct talloc_memlimit *limit, + size_t size) +{ + struct talloc_memlimit *l; + + for (l = limit; l != NULL; l = l->upper) { + if (l->cur_size < size) { + talloc_abort("logic error in talloc_memlimit_shrink\n"); + return; + } + l->cur_size = l->cur_size - size; + } +} + +_PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size) +{ + struct talloc_chunk *tc = talloc_chunk_from_ptr(ctx); + struct talloc_memlimit *orig_limit; + struct talloc_memlimit *limit = NULL; + + if (tc->limit && tc->limit->parent == tc) { + tc->limit->max_size = max_size; + return 0; + } + orig_limit = tc->limit; + + limit = malloc(sizeof(struct talloc_memlimit)); + if (limit == NULL) { + return 1; + } + limit->parent = tc; + limit->max_size = max_size; + limit->cur_size = _talloc_total_limit_size(ctx, tc->limit, limit); + + if (orig_limit) { + limit->upper = orig_limit; + } else { + limit->upper = NULL; + } + + return 0; +} diff --git a/talloc.h b/talloc.h new file mode 100644 index 0000000..eef1a70 --- /dev/null +++ b/talloc.h @@ -0,0 +1,1972 @@ +#ifndef _TALLOC_H_ +#define _TALLOC_H_ +/* + Unix SMB/CIFS implementation. + Samba temporary memory allocation functions + + Copyright (C) Andrew Tridgell 2004-2005 + Copyright (C) Stefan Metzmacher 2006 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include <stdlib.h> +#include <stdio.h> +#include <stdarg.h> + +#ifdef __cplusplus +extern "C" { +#endif + +/* for old gcc releases that don't have the feature test macro __has_attribute */ +#ifndef __has_attribute +#define __has_attribute(x) 0 +#endif + +#ifndef _PUBLIC_ +#if __has_attribute(visibility) +#define _PUBLIC_ __attribute__((visibility("default"))) +#else +#define _PUBLIC_ +#endif +#endif + +/** + * @defgroup talloc The talloc API + * + * talloc is a hierarchical, reference counted memory pool system with + * destructors. It is the core memory allocator used in Samba. + * + * @{ + */ + +#define TALLOC_VERSION_MAJOR 2 +#define TALLOC_VERSION_MINOR 4 + +_PUBLIC_ int talloc_version_major(void); +_PUBLIC_ int talloc_version_minor(void); +/* This is mostly useful only for testing */ +_PUBLIC_ int talloc_test_get_magic(void); + +/** + * @brief Define a talloc parent type + * + * As talloc is a hierarchial memory allocator, every talloc chunk is a + * potential parent to other talloc chunks. So defining a separate type for a + * talloc chunk is not strictly necessary. TALLOC_CTX is defined nevertheless, + * as it provides an indicator for function arguments. You will frequently + * write code like + * + * @code + * struct foo *foo_create(TALLOC_CTX *mem_ctx) + * { + * struct foo *result; + * result = talloc(mem_ctx, struct foo); + * if (result == NULL) return NULL; + * ... initialize foo ... + * return result; + * } + * @endcode + * + * In this type of allocating functions it is handy to have a general + * TALLOC_CTX type to indicate which parent to put allocated structures on. + */ +typedef void TALLOC_CTX; + +/* + this uses a little trick to allow __LINE__ to be stringified +*/ +#ifndef __location__ +#define __TALLOC_STRING_LINE1__(s) #s +#define __TALLOC_STRING_LINE2__(s) __TALLOC_STRING_LINE1__(s) +#define __TALLOC_STRING_LINE3__ __TALLOC_STRING_LINE2__(__LINE__) +#define __location__ __FILE__ ":" __TALLOC_STRING_LINE3__ +#endif + +#ifndef TALLOC_DEPRECATED +#define TALLOC_DEPRECATED 0 +#endif + +#ifndef PRINTF_ATTRIBUTE +#if __has_attribute(format) || (__GNUC__ >= 3) +/** Use gcc attribute to check printf fns. a1 is the 1-based index of + * the parameter containing the format, and a2 the index of the first + * argument. Note that some gcc 2.x versions don't handle this + * properly **/ +#define PRINTF_ATTRIBUTE(a1, a2) __attribute__ ((format (__printf__, a1, a2))) +#else +#define PRINTF_ATTRIBUTE(a1, a2) +#endif +#endif + +#ifndef _DEPRECATED_ +#if __has_attribute(deprecated) || (__GNUC__ >= 3) +#define _DEPRECATED_ __attribute__ ((deprecated)) +#else +#define _DEPRECATED_ +#endif +#endif +#ifdef DOXYGEN + +/** + * @brief Create a new talloc context. + * + * The talloc() macro is the core of the talloc library. It takes a memory + * context and a type, and returns a pointer to a new area of memory of the + * given type. + * + * The returned pointer is itself a talloc context, so you can use it as the + * context argument to more calls to talloc if you wish. + * + * The returned pointer is a "child" of the supplied context. This means that if + * you talloc_free() the context then the new child disappears as well. + * Alternatively you can free just the child. + * + * @param[in] ctx A talloc context to create a new reference on or NULL to + * create a new top level context. + * + * @param[in] type The type of memory to allocate. + * + * @return A type casted talloc context or NULL on error. + * + * @code + * unsigned int *a, *b; + * + * a = talloc(NULL, unsigned int); + * b = talloc(a, unsigned int); + * @endcode + * + * @see talloc_zero + * @see talloc_array + * @see talloc_steal + * @see talloc_free + */ +_PUBLIC_ void *talloc(const void *ctx, #type); +#else +#define talloc(ctx, type) (type *)talloc_named_const(ctx, sizeof(type), #type) +_PUBLIC_ void *_talloc(const void *context, size_t size); +#endif + +/** + * @brief Create a new top level talloc context. + * + * This function creates a zero length named talloc context as a top level + * context. It is equivalent to: + * + * @code + * talloc_named(NULL, 0, fmt, ...); + * @endcode + * @param[in] fmt Format string for the name. + * + * @param[in] ... Additional printf-style arguments. + * + * @return The allocated memory chunk, NULL on error. + * + * @see talloc_named() + */ +_PUBLIC_ void *talloc_init(const char *fmt, ...) PRINTF_ATTRIBUTE(1,2); + +#ifdef DOXYGEN +/** + * @brief Free a chunk of talloc memory. + * + * The talloc_free() function frees a piece of talloc memory, and all its + * children. You can call talloc_free() on any pointer returned by + * talloc(). + * + * The return value of talloc_free() indicates success or failure, with 0 + * returned for success and -1 for failure. A possible failure condition + * is if the pointer had a destructor attached to it and the destructor + * returned -1. See talloc_set_destructor() for details on + * destructors. Likewise, if "ptr" is NULL, then the function will make + * no modifications and return -1. + * + * From version 2.0 and onwards, as a special case, talloc_free() is + * refused on pointers that have more than one parent associated, as talloc + * would have no way of knowing which parent should be removed. This is + * different from older versions in the sense that always the reference to + * the most recently established parent has been destroyed. Hence to free a + * pointer that has more than one parent please use talloc_unlink(). + * + * To help you find problems in your code caused by this behaviour, if + * you do try and free a pointer with more than one parent then the + * talloc logging function will be called to give output like this: + * + * @code + * ERROR: talloc_free with references at some_dir/source/foo.c:123 + * reference at some_dir/source/other.c:325 + * reference at some_dir/source/third.c:121 + * @endcode + * + * Please see the documentation for talloc_set_log_fn() and + * talloc_set_log_stderr() for more information on talloc logging + * functions. + * + * If <code>TALLOC_FREE_FILL</code> environment variable is set, + * the memory occupied by the context is filled with the value of this variable. + * The value should be a numeric representation of the character you want to + * use. + * + * talloc_free() operates recursively on its children. + * + * @param[in] ptr The chunk to be freed. + * + * @return Returns 0 on success and -1 on error. A possible + * failure condition is if the pointer had a destructor + * attached to it and the destructor returned -1. Likewise, + * if "ptr" is NULL, then the function will make no + * modifications and returns -1. + * + * Example: + * @code + * unsigned int *a, *b; + * a = talloc(NULL, unsigned int); + * b = talloc(a, unsigned int); + * + * talloc_free(a); // Frees a and b + * @endcode + * + * @see talloc_set_destructor() + * @see talloc_unlink() + */ +_PUBLIC_ int talloc_free(void *ptr); +#else +#define talloc_free(ctx) _talloc_free(ctx, __location__) +_PUBLIC_ int _talloc_free(void *ptr, const char *location); +#endif + +/** + * @brief Free a talloc chunk's children. + * + * The function walks along the list of all children of a talloc context and + * talloc_free()s only the children, not the context itself. + * + * A NULL argument is handled as no-op. + * + * @param[in] ptr The chunk that you want to free the children of + * (NULL is allowed too) + */ +_PUBLIC_ void talloc_free_children(void *ptr); + +#ifdef DOXYGEN +/** + * @brief Assign a destructor function to be called when a chunk is freed. + * + * The function talloc_set_destructor() sets the "destructor" for the pointer + * "ptr". A destructor is a function that is called when the memory used by a + * pointer is about to be released. The destructor receives the pointer as an + * argument, and should return 0 for success and -1 for failure. + * + * The destructor can do anything it wants to, including freeing other pieces + * of memory. A common use for destructors is to clean up operating system + * resources (such as open file descriptors) contained in the structure the + * destructor is placed on. + * + * You can only place one destructor on a pointer. If you need more than one + * destructor then you can create a zero-length child of the pointer and place + * an additional destructor on that. + * + * To remove a destructor call talloc_set_destructor() with NULL for the + * destructor. + * + * If your destructor attempts to talloc_free() the pointer that it is the + * destructor for then talloc_free() will return -1 and the free will be + * ignored. This would be a pointless operation anyway, as the destructor is + * only called when the memory is just about to go away. + * + * @param[in] ptr The talloc chunk to add a destructor to. + * + * @param[in] destructor The destructor function to be called. NULL to remove + * it. + * + * Example: + * @code + * static int destroy_fd(int *fd) { + * close(*fd); + * return 0; + * } + * + * int *open_file(const char *filename) { + * int *fd = talloc(NULL, int); + * *fd = open(filename, O_RDONLY); + * if (*fd < 0) { + * talloc_free(fd); + * return NULL; + * } + * // Whenever they free this, we close the file. + * talloc_set_destructor(fd, destroy_fd); + * return fd; + * } + * @endcode + * + * @see talloc() + * @see talloc_free() + */ +_PUBLIC_ void talloc_set_destructor(const void *ptr, int (*destructor)(void *)); + +/** + * @brief Change a talloc chunk's parent. + * + * The talloc_steal() function changes the parent context of a talloc + * pointer. It is typically used when the context that the pointer is + * currently a child of is going to be freed and you wish to keep the + * memory for a longer time. + * + * To make the changed hierarchy less error-prone, you might consider to use + * talloc_move(). + * + * If you try and call talloc_steal() on a pointer that has more than one + * parent then the result is ambiguous. Talloc will choose to remove the + * parent that is currently indicated by talloc_parent() and replace it with + * the chosen parent. You will also get a message like this via the talloc + * logging functions: + * + * @code + * WARNING: talloc_steal with references at some_dir/source/foo.c:123 + * reference at some_dir/source/other.c:325 + * reference at some_dir/source/third.c:121 + * @endcode + * + * To unambiguously change the parent of a pointer please see the function + * talloc_reparent(). See the talloc_set_log_fn() documentation for more + * information on talloc logging. + * + * @param[in] new_ctx The new parent context. + * + * @param[in] ptr The talloc chunk to move. + * + * @return Returns the pointer that you pass it. It does not have + * any failure modes. + * + * @note It is possible to produce loops in the parent/child relationship + * if you are not careful with talloc_steal(). No guarantees are provided + * as to your sanity or the safety of your data if you do this. + */ +_PUBLIC_ void *talloc_steal(const void *new_ctx, const void *ptr); +#else /* DOXYGEN */ +/* try to make talloc_set_destructor() and talloc_steal() type safe, + if we have a recent gcc */ +#if (__GNUC__ >= 3) +#define _TALLOC_TYPEOF(ptr) __typeof__(ptr) +#define talloc_set_destructor(ptr, function) \ + do { \ + int (*_talloc_destructor_fn)(_TALLOC_TYPEOF(ptr)) = (function); \ + _talloc_set_destructor((ptr), (int (*)(void *))_talloc_destructor_fn); \ + } while(0) +/* this extremely strange macro is to avoid some braindamaged warning + stupidity in gcc 4.1.x */ +#define talloc_steal(ctx, ptr) ({ _TALLOC_TYPEOF(ptr) __talloc_steal_ret = (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__); __talloc_steal_ret; }) +#else /* __GNUC__ >= 3 */ +#define talloc_set_destructor(ptr, function) \ + _talloc_set_destructor((ptr), (int (*)(void *))(function)) +#define _TALLOC_TYPEOF(ptr) void * +#define talloc_steal(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_steal_loc((ctx),(ptr), __location__) +#endif /* __GNUC__ >= 3 */ +_PUBLIC_ void _talloc_set_destructor(const void *ptr, int (*_destructor)(void *)); +_PUBLIC_ void *_talloc_steal_loc(const void *new_ctx, const void *ptr, const char *location); +#endif /* DOXYGEN */ + +/** + * @brief Assign a name to a talloc chunk. + * + * Each talloc pointer has a "name". The name is used principally for + * debugging purposes, although it is also possible to set and get the name on + * a pointer in as a way of "marking" pointers in your code. + * + * The main use for names on pointer is for "talloc reports". See + * talloc_report() and talloc_report_full() for details. Also see + * talloc_enable_leak_report() and talloc_enable_leak_report_full(). + * + * The talloc_set_name() function allocates memory as a child of the + * pointer. It is logically equivalent to: + * + * @code + * talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...)); + * @endcode + * + * @param[in] ptr The talloc chunk to assign a name to. + * + * @param[in] fmt Format string for the name. + * + * @param[in] ... Add printf-style additional arguments. + * + * @return The assigned name, NULL on error. + * + * @note Multiple calls to talloc_set_name() will allocate more memory without + * releasing the name. All of the memory is released when the ptr is freed + * using talloc_free(). + */ +_PUBLIC_ const char *talloc_set_name(const void *ptr, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); + +#ifdef DOXYGEN +/** + * @brief Change a talloc chunk's parent. + * + * This function has the same effect as talloc_steal(), and additionally sets + * the source pointer to NULL. You would use it like this: + * + * @code + * struct foo *X = talloc(tmp_ctx, struct foo); + * struct foo *Y; + * Y = talloc_move(new_ctx, &X); + * @endcode + * + * @param[in] new_ctx The new parent context. + * + * @param[in] pptr Pointer to a pointer to the talloc chunk to move. + * + * @return The pointer to the talloc chunk that moved. + * It does not have any failure modes. + * + */ +_PUBLIC_ void *talloc_move(const void *new_ctx, void **pptr); +#else +#define talloc_move(ctx, pptr) (_TALLOC_TYPEOF(*(pptr)))_talloc_move((ctx),(void *)(pptr)) +_PUBLIC_ void *_talloc_move(const void *new_ctx, const void *pptr); +#endif + +/** + * @brief Assign a name to a talloc chunk. + * + * The function is just like talloc_set_name(), but it takes a string constant, + * and is much faster. It is extensively used by the "auto naming" macros, such + * as talloc_p(). + * + * This function does not allocate any memory. It just copies the supplied + * pointer into the internal representation of the talloc ptr. This means you + * must not pass a name pointer to memory that will disappear before the ptr + * is freed with talloc_free(). + * + * @param[in] ptr The talloc chunk to assign a name to. + * + * @param[in] name Format string for the name. + */ +_PUBLIC_ void talloc_set_name_const(const void *ptr, const char *name); + +/** + * @brief Create a named talloc chunk. + * + * The talloc_named() function creates a named talloc pointer. It is + * equivalent to: + * + * @code + * ptr = talloc_size(context, size); + * talloc_set_name(ptr, fmt, ....); + * @endcode + * + * @param[in] context The talloc context to hang the result off. + * + * @param[in] size Number of char's that you want to allocate. + * + * @param[in] fmt Format string for the name. + * + * @param[in] ... Additional printf-style arguments. + * + * @return The allocated memory chunk, NULL on error. + * + * @see talloc_set_name() + */ +_PUBLIC_ void *talloc_named(const void *context, size_t size, + const char *fmt, ...) PRINTF_ATTRIBUTE(3,4); + +/** + * @brief Basic routine to allocate a chunk of memory. + * + * This is equivalent to: + * + * @code + * ptr = talloc_size(context, size); + * talloc_set_name_const(ptr, name); + * @endcode + * + * @param[in] context The parent context. + * + * @param[in] size The number of char's that we want to allocate. + * + * @param[in] name The name the talloc block has. + * + * @return The allocated memory chunk, NULL on error. + */ +_PUBLIC_ void *talloc_named_const(const void *context, size_t size, const char *name); + +#ifdef DOXYGEN +/** + * @brief Untyped allocation. + * + * The function should be used when you don't have a convenient type to pass to + * talloc(). Unlike talloc(), it is not type safe (as it returns a void *), so + * you are on your own for type checking. + * + * Best to use talloc() or talloc_array() instead. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] size Number of char's that you want to allocate. + * + * @return The allocated memory chunk, NULL on error. + * + * Example: + * @code + * void *mem = talloc_size(NULL, 100); + * @endcode + */ +_PUBLIC_ void *talloc_size(const void *ctx, size_t size); +#else +#define talloc_size(ctx, size) talloc_named_const(ctx, size, __location__) +#endif + +#ifdef DOXYGEN +/** + * @brief Allocate into a typed pointer. + * + * The talloc_ptrtype() macro should be used when you have a pointer and want + * to allocate memory to point at with this pointer. When compiling with + * gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() and + * talloc_get_name() will return the current location in the source file and + * not the type. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] type The pointer you want to assign the result to. + * + * @return The properly casted allocated memory chunk, NULL on + * error. + * + * Example: + * @code + * unsigned int *a = talloc_ptrtype(NULL, a); + * @endcode + */ +_PUBLIC_ void *talloc_ptrtype(const void *ctx, #type); +#else +#define talloc_ptrtype(ctx, ptr) (_TALLOC_TYPEOF(ptr))talloc_size(ctx, sizeof(*(ptr))) +#endif + +#ifdef DOXYGEN +/** + * @brief Allocate a new 0-sized talloc chunk. + * + * This is a utility macro that creates a new memory context hanging off an + * existing context, automatically naming it "talloc_new: __location__" where + * __location__ is the source line it is called from. It is particularly + * useful for creating a new temporary working context. + * + * @param[in] ctx The talloc parent context. + * + * @return A new talloc chunk, NULL on error. + */ +_PUBLIC_ void *talloc_new(const void *ctx); +#else +#define talloc_new(ctx) talloc_named_const(ctx, 0, "talloc_new: " __location__) +#endif + +#ifdef DOXYGEN +/** + * @brief Allocate a 0-initizialized structure. + * + * The macro is equivalent to: + * + * @code + * ptr = talloc(ctx, type); + * if (ptr) memset(ptr, 0, sizeof(type)); + * @endcode + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] type The type that we want to allocate. + * + * @return Pointer to a piece of memory, properly cast to 'type *', + * NULL on error. + * + * Example: + * @code + * unsigned int *a, *b; + * a = talloc_zero(NULL, unsigned int); + * b = talloc_zero(a, unsigned int); + * @endcode + * + * @see talloc() + * @see talloc_zero_size() + * @see talloc_zero_array() + */ +_PUBLIC_ void *talloc_zero(const void *ctx, #type); + +/** + * @brief Allocate untyped, 0-initialized memory. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] size Number of char's that you want to allocate. + * + * @return The allocated memory chunk. + */ +_PUBLIC_ void *talloc_zero_size(const void *ctx, size_t size); +#else +#define talloc_zero(ctx, type) (type *)_talloc_zero(ctx, sizeof(type), #type) +#define talloc_zero_size(ctx, size) _talloc_zero(ctx, size, __location__) +_PUBLIC_ void *_talloc_zero(const void *ctx, size_t size, const char *name); +#endif + +/** + * @brief Return the name of a talloc chunk. + * + * @param[in] ptr The talloc chunk. + * + * @return The current name for the given talloc pointer. + * + * @see talloc_set_name() + */ +_PUBLIC_ const char *talloc_get_name(const void *ptr); + +/** + * @brief Verify that a talloc chunk carries a specified name. + * + * This function checks if a pointer has the specified name. If it does + * then the pointer is returned. + * + * @param[in] ptr The talloc chunk to check. + * + * @param[in] name The name to check against. + * + * @return The pointer if the name matches, NULL if it doesn't. + */ +_PUBLIC_ void *talloc_check_name(const void *ptr, const char *name); + +/** + * @brief Get the parent chunk of a pointer. + * + * @param[in] ptr The talloc pointer to inspect. + * + * @return The talloc parent of ptr, NULL on error. + */ +_PUBLIC_ void *talloc_parent(const void *ptr); + +/** + * @brief Get a talloc chunk's parent name. + * + * @param[in] ptr The talloc pointer to inspect. + * + * @return The name of ptr's parent chunk. + */ +_PUBLIC_ const char *talloc_parent_name(const void *ptr); + +/** + * @brief Get the size of a talloc chunk. + * + * This function lets you know the amount of memory allocated so far by + * this context. It does NOT account for subcontext memory. + * This can be used to calculate the size of an array. + * + * @param[in] ctx The talloc chunk. + * + * @return The size of the talloc chunk. + */ +_PUBLIC_ size_t talloc_get_size(const void *ctx); + +/** + * @brief Get the total size of a talloc chunk including its children. + * + * The function returns the total size in bytes used by this pointer and all + * child pointers. Mostly useful for debugging. + * + * Passing NULL is allowed, but it will only give a meaningful result if + * talloc_enable_leak_report() or talloc_enable_leak_report_full() has + * been called. + * + * @param[in] ptr The talloc chunk. + * + * @return The total size. + */ +_PUBLIC_ size_t talloc_total_size(const void *ptr); + +/** + * @brief Get the number of talloc chunks hanging off a chunk. + * + * The talloc_total_blocks() function returns the total memory block + * count used by this pointer and all child pointers. Mostly useful for + * debugging. + * + * Passing NULL is allowed, but it will only give a meaningful result if + * talloc_enable_leak_report() or talloc_enable_leak_report_full() has + * been called. + * + * @param[in] ptr The talloc chunk. + * + * @return The total size. + */ +_PUBLIC_ size_t talloc_total_blocks(const void *ptr); + +#ifdef DOXYGEN +/** + * @brief Duplicate a memory area into a talloc chunk. + * + * The function is equivalent to: + * + * @code + * ptr = talloc_size(ctx, size); + * if (ptr) memcpy(ptr, p, size); + * @endcode + * + * @param[in] t The talloc context to hang the result off. + * + * @param[in] p The memory chunk you want to duplicate. + * + * @param[in] size Number of chars that you want to copy. + * + * @return The allocated memory chunk. + * + * @see talloc_size() + */ +_PUBLIC_ void *talloc_memdup(const void *t, const void *p, size_t size); +#else +#define talloc_memdup(t, p, size) _talloc_memdup(t, p, size, __location__) +_PUBLIC_ void *_talloc_memdup(const void *t, const void *p, size_t size, const char *name); +#endif + +#ifdef DOXYGEN +/** + * @brief Assign a type to a talloc chunk. + * + * This macro allows you to force the name of a pointer to be of a particular + * type. This can be used in conjunction with talloc_get_type() to do type + * checking on void* pointers. + * + * It is equivalent to this: + * + * @code + * talloc_set_name_const(ptr, #type) + * @endcode + * + * @param[in] ptr The talloc chunk to assign the type to. + * + * @param[in] type The type to assign. + */ +_PUBLIC_ void talloc_set_type(const char *ptr, #type); + +/** + * @brief Get a typed pointer out of a talloc pointer. + * + * This macro allows you to do type checking on talloc pointers. It is + * particularly useful for void* private pointers. It is equivalent to + * this: + * + * @code + * (type *)talloc_check_name(ptr, #type) + * @endcode + * + * @param[in] ptr The talloc pointer to check. + * + * @param[in] type The type to check against. + * + * @return The properly casted pointer given by ptr, NULL on error. + */ +type *talloc_get_type(const void *ptr, #type); +#else +#define talloc_set_type(ptr, type) talloc_set_name_const(ptr, #type) +#define talloc_get_type(ptr, type) (type *)talloc_check_name(ptr, #type) +#endif + +#ifdef DOXYGEN +/** + * @brief Safely turn a void pointer into a typed pointer. + * + * This macro is used together with talloc(mem_ctx, struct foo). If you had to + * assign the talloc chunk pointer to some void pointer variable, + * talloc_get_type_abort() is the recommended way to convert the void + * pointer back to a typed pointer. + * + * @param[in] ptr The void pointer to convert. + * + * @param[in] type The type that this chunk contains + * + * @return The same value as ptr, type-checked and properly cast. + */ +_PUBLIC_ void *talloc_get_type_abort(const void *ptr, #type); +#else +#ifdef TALLOC_GET_TYPE_ABORT_NOOP +#define talloc_get_type_abort(ptr, type) (type *)(ptr) +#else +#define talloc_get_type_abort(ptr, type) (type *)_talloc_get_type_abort(ptr, #type, __location__) +#endif +_PUBLIC_ void *_talloc_get_type_abort(const void *ptr, const char *name, const char *location); +#endif + +/** + * @brief Find a parent context by name. + * + * Find a parent memory context of the current context that has the given + * name. This can be very useful in complex programs where it may be + * difficult to pass all information down to the level you need, but you + * know the structure you want is a parent of another context. + * + * @param[in] ctx The talloc chunk to start from. + * + * @param[in] name The name of the parent we look for. + * + * @return The memory context we are looking for, NULL if not + * found. + */ +_PUBLIC_ void *talloc_find_parent_byname(const void *ctx, const char *name); + +#ifdef DOXYGEN +/** + * @brief Find a parent context by type. + * + * Find a parent memory context of the current context that has the given + * name. This can be very useful in complex programs where it may be + * difficult to pass all information down to the level you need, but you + * know the structure you want is a parent of another context. + * + * Like talloc_find_parent_byname() but takes a type, making it typesafe. + * + * @param[in] ptr The talloc chunk to start from. + * + * @param[in] type The type of the parent to look for. + * + * @return The memory context we are looking for, NULL if not + * found. + */ +_PUBLIC_ void *talloc_find_parent_bytype(const void *ptr, #type); +#else +#define talloc_find_parent_bytype(ptr, type) (type *)talloc_find_parent_byname(ptr, #type) +#endif + +/** + * @brief Allocate a talloc pool. + * + * A talloc pool is a pure optimization for specific situations. In the + * release process for Samba 3.2 we found out that we had become considerably + * slower than Samba 3.0 was. Profiling showed that malloc(3) was a large CPU + * consumer in benchmarks. For Samba 3.2 we have internally converted many + * static buffers to dynamically allocated ones, so malloc(3) being beaten + * more was no surprise. But it made us slower. + * + * talloc_pool() is an optimization to call malloc(3) a lot less for the use + * pattern Samba has: The SMB protocol is mainly a request/response protocol + * where we have to allocate a certain amount of memory per request and free + * that after the SMB reply is sent to the client. + * + * talloc_pool() creates a talloc chunk that you can use as a talloc parent + * exactly as you would use any other ::TALLOC_CTX. The difference is that + * when you talloc a child of this pool, no malloc(3) is done. Instead, talloc + * just increments a pointer inside the talloc_pool. This also works + * recursively. If you use the child of the talloc pool as a parent for + * grand-children, their memory is also taken from the talloc pool. + * + * If there is not enough memory in the pool to allocate the new child, + * it will create a new talloc chunk as if the parent was a normal talloc + * context. + * + * If you talloc_free() children of a talloc pool, the memory is not given + * back to the system. Instead, free(3) is only called if the talloc_pool() + * itself is released with talloc_free(). + * + * The downside of a talloc pool is that if you talloc_move() a child of a + * talloc pool to a talloc parent outside the pool, the whole pool memory is + * not free(3)'ed until that moved chunk is also talloc_free()ed. + * + * @param[in] context The talloc context to hang the result off. + * + * @param[in] size Size of the talloc pool. + * + * @return The allocated talloc pool, NULL on error. + */ +_PUBLIC_ void *talloc_pool(const void *context, size_t size); + +#ifdef DOXYGEN +/** + * @brief Allocate a talloc object as/with an additional pool. + * + * This is like talloc_pool(), but's it's more flexible + * and allows an object to be a pool for its children. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] type The type that we want to allocate. + * + * @param[in] num_subobjects The expected number of subobjects, which will + * be allocated within the pool. This allocates + * space for talloc_chunk headers. + * + * @param[in] total_subobjects_size The size that all subobjects can use in total. + * + * + * @return The allocated talloc object, NULL on error. + */ +_PUBLIC_ void *talloc_pooled_object(const void *ctx, #type, + unsigned num_subobjects, + size_t total_subobjects_size); +#else +#define talloc_pooled_object(_ctx, _type, \ + _num_subobjects, \ + _total_subobjects_size) \ + (_type *)_talloc_pooled_object((_ctx), sizeof(_type), #_type, \ + (_num_subobjects), \ + (_total_subobjects_size)) +_PUBLIC_ void *_talloc_pooled_object(const void *ctx, + size_t type_size, + const char *type_name, + unsigned num_subobjects, + size_t total_subobjects_size); +#endif + +/** + * @brief Free a talloc chunk and NULL out the pointer. + * + * TALLOC_FREE() frees a pointer and sets it to NULL. Use this if you want + * immediate feedback (i.e. crash) if you use a pointer after having free'ed + * it. + * + * @param[in] ctx The chunk to be freed. + */ +#define TALLOC_FREE(ctx) do { if (ctx != NULL) { talloc_free(ctx); ctx=NULL; } } while(0) + +/* @} ******************************************************************/ + +/** + * \defgroup talloc_ref The talloc reference function. + * @ingroup talloc + * + * This module contains the definitions around talloc references + * + * @{ + */ + +/** + * @brief Increase the reference count of a talloc chunk. + * + * The talloc_increase_ref_count(ptr) function is exactly equivalent to: + * + * @code + * talloc_reference(NULL, ptr); + * @endcode + * + * You can use either syntax, depending on which you think is clearer in + * your code. + * + * @param[in] ptr The pointer to increase the reference count. + * + * @return 0 on success, -1 on error. + */ +_PUBLIC_ int talloc_increase_ref_count(const void *ptr); + +/** + * @brief Get the number of references to a talloc chunk. + * + * @param[in] ptr The pointer to retrieve the reference count from. + * + * @return The number of references. + */ +_PUBLIC_ size_t talloc_reference_count(const void *ptr); + +#ifdef DOXYGEN +/** + * @brief Create an additional talloc parent to a pointer. + * + * The talloc_reference() function makes "context" an additional parent of + * ptr. Each additional reference consumes around 48 bytes of memory on intel + * x86 platforms. + * + * If ptr is NULL, then the function is a no-op, and simply returns NULL. + * + * After creating a reference you can free it in one of the following ways: + * + * - you can talloc_free() any parent of the original pointer. That + * will reduce the number of parents of this pointer by 1, and will + * cause this pointer to be freed if it runs out of parents. + * + * - you can talloc_free() the pointer itself if it has at maximum one + * parent. This behaviour has been changed since the release of version + * 2.0. Further information in the description of "talloc_free". + * + * For more control on which parent to remove, see talloc_unlink() + * @param[in] ctx The additional parent. + * + * @param[in] ptr The pointer you want to create an additional parent for. + * + * @return The original pointer 'ptr', NULL if talloc ran out of + * memory in creating the reference. + * + * @warning You should try to avoid using this interface. It turns a beautiful + * talloc-tree into a graph. It is often really hard to debug if you + * screw something up by accident. + * + * Example: + * @code + * unsigned int *a, *b, *c; + * a = talloc(NULL, unsigned int); + * b = talloc(NULL, unsigned int); + * c = talloc(a, unsigned int); + * // b also serves as a parent of c. + * talloc_reference(b, c); + * @endcode + * + * @see talloc_unlink() + */ +_PUBLIC_ void *talloc_reference(const void *ctx, const void *ptr); +#else +#define talloc_reference(ctx, ptr) (_TALLOC_TYPEOF(ptr))_talloc_reference_loc((ctx),(ptr), __location__) +_PUBLIC_ void *_talloc_reference_loc(const void *context, const void *ptr, const char *location); +#endif + +/** + * @brief Remove a specific parent from a talloc chunk. + * + * The function removes a specific parent from ptr. The context passed must + * either be a context used in talloc_reference() with this pointer, or must be + * a direct parent of ptr. + * + * You can just use talloc_free() instead of talloc_unlink() if there + * is at maximum one parent. This behaviour has been changed since the + * release of version 2.0. Further information in the description of + * "talloc_free". + * + * @param[in] context The talloc parent to remove. + * + * @param[in] ptr The talloc ptr you want to remove the parent from. + * + * @return 0 on success, -1 on error. + * + * @note If the parent has already been removed using talloc_free() then + * this function will fail and will return -1. Likewise, if ptr is NULL, + * then the function will make no modifications and return -1. + * + * @warning You should try to avoid using this interface. It turns a beautiful + * talloc-tree into a graph. It is often really hard to debug if you + * screw something up by accident. + * + * Example: + * @code + * unsigned int *a, *b, *c; + * a = talloc(NULL, unsigned int); + * b = talloc(NULL, unsigned int); + * c = talloc(a, unsigned int); + * // b also serves as a parent of c. + * talloc_reference(b, c); + * talloc_unlink(b, c); + * @endcode + */ +_PUBLIC_ int talloc_unlink(const void *context, void *ptr); + +/** + * @brief Provide a talloc context that is freed at program exit. + * + * This is a handy utility function that returns a talloc context + * which will be automatically freed on program exit. This can be used + * to reduce the noise in memory leak reports. + * + * Never use this in code that might be used in objects loaded with + * dlopen and unloaded with dlclose. talloc_autofree_context() + * internally uses atexit(3). Some platforms like modern Linux handles + * this fine, but for example FreeBSD does not deal well with dlopen() + * and atexit() used simultaneously: dlclose() does not clean up the + * list of atexit-handlers, so when the program exits the code that + * was registered from within talloc_autofree_context() is gone, the + * program crashes at exit. + * + * @return A talloc context, NULL on error. + */ +_PUBLIC_ void *talloc_autofree_context(void) _DEPRECATED_; + +/** + * @brief Show the parentage of a context. + * + * @param[in] context The talloc context to look at. + * + * @param[in] file The output to use, a file, stdout or stderr. + */ +_PUBLIC_ void talloc_show_parents(const void *context, FILE *file); + +/** + * @brief Check if a context is parent of a talloc chunk. + * + * This checks if context is referenced in the talloc hierarchy above ptr. + * + * @param[in] context The assumed talloc context. + * + * @param[in] ptr The talloc chunk to check. + * + * @return Return 1 if this is the case, 0 if not. + */ +_PUBLIC_ int talloc_is_parent(const void *context, const void *ptr); + +/** + * @brief Change the parent context of a talloc pointer. + * + * The function changes the parent context of a talloc pointer. It is typically + * used when the context that the pointer is currently a child of is going to be + * freed and you wish to keep the memory for a longer time. + * + * The difference between talloc_reparent() and talloc_steal() is that + * talloc_reparent() can specify which parent you wish to change. This is + * useful when a pointer has multiple parents via references. + * + * @param[in] old_parent + * @param[in] new_parent + * @param[in] ptr + * + * @return Return the pointer you passed. It does not have any + * failure modes. + */ +_PUBLIC_ void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr); + +/* @} ******************************************************************/ + +/** + * @defgroup talloc_array The talloc array functions + * @ingroup talloc + * + * Talloc contains some handy helpers for handling Arrays conveniently + * + * @{ + */ + +#ifdef DOXYGEN +/** + * @brief Allocate an array. + * + * The macro is equivalent to: + * + * @code + * (type *)talloc_size(ctx, sizeof(type) * count); + * @endcode + * + * except that it provides integer overflow protection for the multiply, + * returning NULL if the multiply overflows. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] type The type that we want to allocate. + * + * @param[in] count The number of 'type' elements you want to allocate. + * + * @return The allocated result, properly cast to 'type *', NULL on + * error. + * + * Example: + * @code + * unsigned int *a, *b; + * a = talloc_zero(NULL, unsigned int); + * b = talloc_array(a, unsigned int, 100); + * @endcode + * + * @see talloc() + * @see talloc_zero_array() + */ +_PUBLIC_ void *talloc_array(const void *ctx, #type, unsigned count); +#else +#define talloc_array(ctx, type, count) (type *)_talloc_array(ctx, sizeof(type), count, #type) +_PUBLIC_ void *_talloc_array(const void *ctx, size_t el_size, unsigned count, const char *name); +#endif + +#ifdef DOXYGEN +/** + * @brief Allocate an array. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] size The size of an array element. + * + * @param[in] count The number of elements you want to allocate. + * + * @return The allocated result, NULL on error. + */ +_PUBLIC_ void *talloc_array_size(const void *ctx, size_t size, unsigned count); +#else +#define talloc_array_size(ctx, size, count) _talloc_array(ctx, size, count, __location__) +#endif + +#ifdef DOXYGEN +/** + * @brief Allocate an array into a typed pointer. + * + * The macro should be used when you have a pointer to an array and want to + * allocate memory of an array to point at with this pointer. When compiling + * with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() + * and talloc_get_name() will return the current location in the source file + * and not the type. + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] ptr The pointer you want to assign the result to. + * + * @param[in] count The number of elements you want to allocate. + * + * @return The allocated memory chunk, properly casted. NULL on + * error. + */ +void *talloc_array_ptrtype(const void *ctx, const void *ptr, unsigned count); +#else +#define talloc_array_ptrtype(ctx, ptr, count) (_TALLOC_TYPEOF(ptr))talloc_array_size(ctx, sizeof(*(ptr)), count) +#endif + +#ifdef DOXYGEN +/** + * @brief Get the number of elements in a talloc'ed array. + * + * A talloc chunk carries its own size, so for talloc'ed arrays it is not + * necessary to store the number of elements explicitly. + * + * @param[in] ctx The allocated array. + * + * @return The number of elements in ctx. + */ +size_t talloc_array_length(const void *ctx); +#else +#define talloc_array_length(ctx) (talloc_get_size(ctx)/sizeof(*ctx)) +#endif + +#ifdef DOXYGEN +/** + * @brief Allocate a zero-initialized array + * + * @param[in] ctx The talloc context to hang the result off. + * + * @param[in] type The type that we want to allocate. + * + * @param[in] count The number of "type" elements you want to allocate. + * + * @return The allocated result casted to "type *", NULL on error. + * + * The talloc_zero_array() macro is equivalent to: + * + * @code + * ptr = talloc_array(ctx, type, count); + * if (ptr) memset(ptr, 0, sizeof(type) * count); + * @endcode + */ +void *talloc_zero_array(const void *ctx, #type, unsigned count); +#else +#define talloc_zero_array(ctx, type, count) (type *)_talloc_zero_array(ctx, sizeof(type), count, #type) +_PUBLIC_ void *_talloc_zero_array(const void *ctx, + size_t el_size, + unsigned count, + const char *name); +#endif + +#ifdef DOXYGEN +/** + * @brief Change the size of a talloc array. + * + * The macro changes the size of a talloc pointer. The 'count' argument is the + * number of elements of type 'type' that you want the resulting pointer to + * hold. + * + * talloc_realloc() has the following equivalences: + * + * @code + * talloc_realloc(ctx, NULL, type, 1) ==> talloc(ctx, type); + * talloc_realloc(ctx, NULL, type, N) ==> talloc_array(ctx, type, N); + * talloc_realloc(ctx, ptr, type, 0) ==> talloc_free(ptr); + * @endcode + * + * The "context" argument is only used if "ptr" is NULL, otherwise it is + * ignored. + * + * @param[in] ctx The parent context used if ptr is NULL. + * + * @param[in] ptr The chunk to be resized. + * + * @param[in] type The type of the array element inside ptr. + * + * @param[in] count The intended number of array elements. + * + * @return The new array, NULL on error. The call will fail either + * due to a lack of memory, or because the pointer has more + * than one parent (see talloc_reference()). + */ +_PUBLIC_ void *talloc_realloc(const void *ctx, void *ptr, #type, size_t count); +#else +#define talloc_realloc(ctx, p, type, count) (type *)_talloc_realloc_array(ctx, p, sizeof(type), count, #type) +_PUBLIC_ void *_talloc_realloc_array(const void *ctx, void *ptr, size_t el_size, unsigned count, const char *name); +#endif + +#ifdef DOXYGEN +/** + * @brief Untyped realloc to change the size of a talloc array. + * + * The macro is useful when the type is not known so the typesafe + * talloc_realloc() cannot be used. + * + * @param[in] ctx The parent context used if 'ptr' is NULL. + * + * @param[in] ptr The chunk to be resized. + * + * @param[in] size The new chunk size. + * + * @return The new array, NULL on error. + */ +void *talloc_realloc_size(const void *ctx, void *ptr, size_t size); +#else +#define talloc_realloc_size(ctx, ptr, size) _talloc_realloc(ctx, ptr, size, __location__) +_PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, const char *name); +#endif + +/** + * @brief Provide a function version of talloc_realloc_size. + * + * This is a non-macro version of talloc_realloc(), which is useful as + * libraries sometimes want a ralloc function pointer. A realloc() + * implementation encapsulates the functionality of malloc(), free() and + * realloc() in one call, which is why it is useful to be able to pass around + * a single function pointer. + * + * @param[in] context The parent context used if ptr is NULL. + * + * @param[in] ptr The chunk to be resized. + * + * @param[in] size The new chunk size. + * + * @return The new chunk, NULL on error. + */ +_PUBLIC_ void *talloc_realloc_fn(const void *context, void *ptr, size_t size); + +/* @} ******************************************************************/ + +/** + * @defgroup talloc_string The talloc string functions. + * @ingroup talloc + * + * talloc string allocation and manipulation functions. + * @{ + */ + +/** + * @brief Duplicate a string into a talloc chunk. + * + * This function is equivalent to: + * + * @code + * ptr = talloc_size(ctx, strlen(p)+1); + * if (ptr) memcpy(ptr, p, strlen(p)+1); + * @endcode + * + * This functions sets the name of the new pointer to the passed + * string. This is equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * @param[in] t The talloc context to hang the result off. + * + * @param[in] p The string you want to duplicate. + * + * @return The duplicated string, NULL on error. + */ +_PUBLIC_ char *talloc_strdup(const void *t, const char *p); + +/** + * @brief Append a string to given string. + * + * The destination string is reallocated to take + * <code>strlen(s) + strlen(a) + 1</code> characters. + * + * This functions sets the name of the new pointer to the new + * string. This is equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * If <code>s == NULL</code> then new context is created. + * + * @param[in] s The destination to append to. + * + * @param[in] a The string you want to append. + * + * @return The concatenated strings, NULL on error. + * + * @see talloc_strdup() + * @see talloc_strdup_append_buffer() + */ +_PUBLIC_ char *talloc_strdup_append(char *s, const char *a); + +/** + * @brief Append a string to a given buffer. + * + * This is a more efficient version of talloc_strdup_append(). It determines the + * length of the destination string by the size of the talloc context. + * + * Use this very carefully as it produces a different result than + * talloc_strdup_append() when a zero character is in the middle of the + * destination string. + * + * @code + * char *str_a = talloc_strdup(NULL, "hello world"); + * char *str_b = talloc_strdup(NULL, "hello world"); + * str_a[5] = str_b[5] = '\0' + * + * char *app = talloc_strdup_append(str_a, ", hello"); + * char *buf = talloc_strdup_append_buffer(str_b, ", hello"); + * + * printf("%s\n", app); // hello, hello (app = "hello, hello") + * printf("%s\n", buf); // hello (buf = "hello\0world, hello") + * @endcode + * + * If <code>s == NULL</code> then new context is created. + * + * @param[in] s The destination buffer to append to. + * + * @param[in] a The string you want to append. + * + * @return The concatenated strings, NULL on error. + * + * @see talloc_strdup() + * @see talloc_strdup_append() + * @see talloc_array_length() + */ +_PUBLIC_ char *talloc_strdup_append_buffer(char *s, const char *a); + +/** + * @brief Duplicate a length-limited string into a talloc chunk. + * + * This function is the talloc equivalent of the C library function strndup(3). + * + * This functions sets the name of the new pointer to the passed string. This is + * equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * @param[in] t The talloc context to hang the result off. + * + * @param[in] p The string you want to duplicate. + * + * @param[in] n The maximum string length to duplicate. + * + * @return The duplicated string, NULL on error. + */ +_PUBLIC_ char *talloc_strndup(const void *t, const char *p, size_t n); + +/** + * @brief Append at most n characters of a string to given string. + * + * The destination string is reallocated to take + * <code>strlen(s) + strnlen(a, n) + 1</code> characters. + * + * This functions sets the name of the new pointer to the new + * string. This is equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * If <code>s == NULL</code> then new context is created. + * + * @param[in] s The destination string to append to. + * + * @param[in] a The source string you want to append. + * + * @param[in] n The number of characters you want to append from the + * string. + * + * @return The concatenated strings, NULL on error. + * + * @see talloc_strndup() + * @see talloc_strndup_append_buffer() + */ +_PUBLIC_ char *talloc_strndup_append(char *s, const char *a, size_t n); + +/** + * @brief Append at most n characters of a string to given buffer + * + * This is a more efficient version of talloc_strndup_append(). It determines + * the length of the destination string by the size of the talloc context. + * + * Use this very carefully as it produces a different result than + * talloc_strndup_append() when a zero character is in the middle of the + * destination string. + * + * @code + * char *str_a = talloc_strdup(NULL, "hello world"); + * char *str_b = talloc_strdup(NULL, "hello world"); + * str_a[5] = str_b[5] = '\0' + * + * char *app = talloc_strndup_append(str_a, ", hello", 7); + * char *buf = talloc_strndup_append_buffer(str_b, ", hello", 7); + * + * printf("%s\n", app); // hello, hello (app = "hello, hello") + * printf("%s\n", buf); // hello (buf = "hello\0world, hello") + * @endcode + * + * If <code>s == NULL</code> then new context is created. + * + * @param[in] s The destination buffer to append to. + * + * @param[in] a The source string you want to append. + * + * @param[in] n The number of characters you want to append from the + * string. + * + * @return The concatenated strings, NULL on error. + * + * @see talloc_strndup() + * @see talloc_strndup_append() + * @see talloc_array_length() + */ +_PUBLIC_ char *talloc_strndup_append_buffer(char *s, const char *a, size_t n); + +/** + * @brief Format a string given a va_list. + * + * This function is the talloc equivalent of the C library function + * vasprintf(3). + * + * This functions sets the name of the new pointer to the new string. This is + * equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * @param[in] t The talloc context to hang the result off. + * + * @param[in] fmt The format string. + * + * @param[in] ap The parameters used to fill fmt. + * + * @return The formatted string, NULL on error. + */ +_PUBLIC_ char *talloc_vasprintf(const void *t, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); + +/** + * @brief Format a string given a va_list and append it to the given destination + * string. + * + * @param[in] s The destination string to append to. + * + * @param[in] fmt The format string. + * + * @param[in] ap The parameters used to fill fmt. + * + * @return The formatted string, NULL on error. + * + * @see talloc_vasprintf() + */ +_PUBLIC_ char *talloc_vasprintf_append(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); + +/** + * @brief Format a string given a va_list and append it to the given destination + * buffer. + * + * @param[in] s The destination buffer to append to. + * + * @param[in] fmt The format string. + * + * @param[in] ap The parameters used to fill fmt. + * + * @return The formatted string, NULL on error. + * + * @see talloc_vasprintf() + */ +_PUBLIC_ char *talloc_vasprintf_append_buffer(char *s, const char *fmt, va_list ap) PRINTF_ATTRIBUTE(2,0); + +/** + * @brief Build up a string buffer, handle allocation failure + * + * @param[in] ps Pointer to the talloc'ed string to be extended + * @param[in] fmt The format string + * @param[in] ... The parameters used to fill fmt. + * + * This does nothing if *ps is NULL and sets *ps to NULL if the + * intermediate reallocation fails. Useful when building up a string + * step by step, no intermediate NULL checks are required. + */ +_PUBLIC_ void talloc_asprintf_addbuf(char **ps, const char *fmt, ...) \ + PRINTF_ATTRIBUTE(2,3); + +/** + * @brief Format a string. + * + * This function is the talloc equivalent of the C library function asprintf(3). + * + * This functions sets the name of the new pointer to the new string. This is + * equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * @param[in] t The talloc context to hang the result off. + * + * @param[in] fmt The format string. + * + * @param[in] ... The parameters used to fill fmt. + * + * @return The formatted string, NULL on error. + */ +_PUBLIC_ char *talloc_asprintf(const void *t, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); + +/** + * @brief Append a formatted string to another string. + * + * This function appends the given formatted string to the given string. Use + * this variant when the string in the current talloc buffer may have been + * truncated in length. + * + * This functions sets the name of the new pointer to the new + * string. This is equivalent to: + * + * @code + * talloc_set_name_const(ptr, ptr) + * @endcode + * + * If <code>s == NULL</code> then new context is created. + * + * @param[in] s The string to append to. + * + * @param[in] fmt The format string. + * + * @param[in] ... The parameters used to fill fmt. + * + * @return The formatted string, NULL on error. + */ +_PUBLIC_ char *talloc_asprintf_append(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); + +/** + * @brief Append a formatted string to another string. + * + * This is a more efficient version of talloc_asprintf_append(). It determines + * the length of the destination string by the size of the talloc context. + * + * Use this very carefully as it produces a different result than + * talloc_asprintf_append() when a zero character is in the middle of the + * destination string. + * + * @code + * char *str_a = talloc_strdup(NULL, "hello world"); + * char *str_b = talloc_strdup(NULL, "hello world"); + * str_a[5] = str_b[5] = '\0' + * + * char *app = talloc_asprintf_append(str_a, "%s", ", hello"); + * char *buf = talloc_strdup_append_buffer(str_b, "%s", ", hello"); + * + * printf("%s\n", app); // hello, hello (app = "hello, hello") + * printf("%s\n", buf); // hello (buf = "hello\0world, hello") + * @endcode + * + * If <code>s == NULL</code> then new context is created. + * + * @param[in] s The string to append to + * + * @param[in] fmt The format string. + * + * @param[in] ... The parameters used to fill fmt. + * + * @return The formatted string, NULL on error. + * + * @see talloc_asprintf() + * @see talloc_asprintf_append() + */ +_PUBLIC_ char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...) PRINTF_ATTRIBUTE(2,3); + +/* @} ******************************************************************/ + +/** + * @defgroup talloc_debug The talloc debugging support functions + * @ingroup talloc + * + * To aid memory debugging, talloc contains routines to inspect the currently + * allocated memory hierarchy. + * + * @{ + */ + +/** + * @brief Walk a complete talloc hierarchy. + * + * This provides a more flexible reports than talloc_report(). It + * will recursively call the callback for the entire tree of memory + * referenced by the pointer. References in the tree are passed with + * is_ref = 1 and the pointer that is referenced. + * + * You can pass NULL for the pointer, in which case a report is + * printed for the top level memory context, but only if + * talloc_enable_leak_report() or talloc_enable_leak_report_full() + * has been called. + * + * The recursion is stopped when depth >= max_depth. + * max_depth = -1 means only stop at leaf nodes. + * + * @param[in] ptr The talloc chunk. + * + * @param[in] depth Internal parameter to control recursion. Call with 0. + * + * @param[in] max_depth Maximum recursion level. + * + * @param[in] callback Function to be called on every chunk. + * + * @param[in] private_data Private pointer passed to callback. + */ +_PUBLIC_ void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, + void (*callback)(const void *ptr, + int depth, int max_depth, + int is_ref, + void *private_data), + void *private_data); + +/** + * @brief Print a talloc hierarchy. + * + * This provides a more flexible reports than talloc_report(). It + * will let you specify the depth and max_depth. + * + * @param[in] ptr The talloc chunk. + * + * @param[in] depth Internal parameter to control recursion. Call with 0. + * + * @param[in] max_depth Maximum recursion level. + * + * @param[in] f The file handle to print to. + */ +_PUBLIC_ void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f); + +/** + * @brief Print a summary report of all memory used by ptr. + * + * This provides a more detailed report than talloc_report(). It will + * recursively print the entire tree of memory referenced by the + * pointer. References in the tree are shown by giving the name of the + * pointer that is referenced. + * + * You can pass NULL for the pointer, in which case a report is printed + * for the top level memory context, but only if + * talloc_enable_leak_report() or talloc_enable_leak_report_full() has + * been called. + * + * @param[in] ptr The talloc chunk. + * + * @param[in] f The file handle to print to. + * + * Example: + * @code + * unsigned int *a, *b; + * a = talloc(NULL, unsigned int); + * b = talloc(a, unsigned int); + * fprintf(stderr, "Dumping memory tree for a:\n"); + * talloc_report_full(a, stderr); + * @endcode + * + * @see talloc_report() + */ +_PUBLIC_ void talloc_report_full(const void *ptr, FILE *f); + +/** + * @brief Print a summary report of all memory used by ptr. + * + * This function prints a summary report of all memory used by ptr. One line of + * report is printed for each immediate child of ptr, showing the total memory + * and number of blocks used by that child. + * + * You can pass NULL for the pointer, in which case a report is printed + * for the top level memory context, but only if talloc_enable_leak_report() + * or talloc_enable_leak_report_full() has been called. + * + * @param[in] ptr The talloc chunk. + * + * @param[in] f The file handle to print to. + * + * Example: + * @code + * unsigned int *a, *b; + * a = talloc(NULL, unsigned int); + * b = talloc(a, unsigned int); + * fprintf(stderr, "Summary of memory tree for a:\n"); + * talloc_report(a, stderr); + * @endcode + * + * @see talloc_report_full() + */ +_PUBLIC_ void talloc_report(const void *ptr, FILE *f); + +/** + * @brief Enable tracking the use of NULL memory contexts. + * + * This enables tracking of the NULL memory context without enabling leak + * reporting on exit. Useful for when you want to do your own leak + * reporting call via talloc_report_null_full(); + */ +_PUBLIC_ void talloc_enable_null_tracking(void); + +/** + * @brief Enable tracking the use of NULL memory contexts. + * + * This enables tracking of the NULL memory context without enabling leak + * reporting on exit. Useful for when you want to do your own leak + * reporting call via talloc_report_null_full(); + */ +_PUBLIC_ void talloc_enable_null_tracking_no_autofree(void); + +/** + * @brief Disable tracking of the NULL memory context. + * + * This disables tracking of the NULL memory context. + */ +_PUBLIC_ void talloc_disable_null_tracking(void); + +/** + * @brief Enable leak report when a program exits. + * + * This enables calling of talloc_report(NULL, stderr) when the program + * exits. In Samba4 this is enabled by using the --leak-report command + * line option. + * + * For it to be useful, this function must be called before any other + * talloc function as it establishes a "null context" that acts as the + * top of the tree. If you don't call this function first then passing + * NULL to talloc_report() or talloc_report_full() won't give you the + * full tree printout. + * + * Here is a typical talloc report: + * + * @code + * talloc report on 'null_context' (total 267 bytes in 15 blocks) + * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks + * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks + * iconv(UTF8,CP850) contains 42 bytes in 2 blocks + * libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks + * iconv(CP850,UTF8) contains 42 bytes in 2 blocks + * iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks + * iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks + * @endcode + */ +_PUBLIC_ void talloc_enable_leak_report(void); + +/** + * @brief Enable full leak report when a program exits. + * + * This enables calling of talloc_report_full(NULL, stderr) when the + * program exits. In Samba4 this is enabled by using the + * --leak-report-full command line option. + * + * For it to be useful, this function must be called before any other + * talloc function as it establishes a "null context" that acts as the + * top of the tree. If you don't call this function first then passing + * NULL to talloc_report() or talloc_report_full() won't give you the + * full tree printout. + * + * Here is a typical full report: + * + * @code + * full talloc report on 'root' (total 18 bytes in 8 blocks) + * p1 contains 18 bytes in 7 blocks (ref 0) + * r1 contains 13 bytes in 2 blocks (ref 0) + * reference to: p2 + * p2 contains 1 bytes in 1 blocks (ref 1) + * x3 contains 1 bytes in 1 blocks (ref 0) + * x2 contains 1 bytes in 1 blocks (ref 0) + * x1 contains 1 bytes in 1 blocks (ref 0) + * @endcode + */ +_PUBLIC_ void talloc_enable_leak_report_full(void); + +/** + * @brief Set a custom "abort" function that is called on serious error. + * + * The default "abort" function is <code>abort()</code>. + * + * The "abort" function is called when: + * + * <ul> + * <li>talloc_get_type_abort() fails</li> + * <li>the provided pointer is not a valid talloc context</li> + * <li>when the context meta data are invalid</li> + * <li>when access after free is detected</li> + * </ul> + * + * Example: + * + * @code + * void my_abort(const char *reason) + * { + * fprintf(stderr, "talloc abort: %s\n", reason); + * abort(); + * } + * + * talloc_set_abort_fn(my_abort); + * @endcode + * + * @param[in] abort_fn The new "abort" function. + * + * @see talloc_set_log_fn() + * @see talloc_get_type() + */ +_PUBLIC_ void talloc_set_abort_fn(void (*abort_fn)(const char *reason)); + +/** + * @brief Set a logging function. + * + * @param[in] log_fn The logging function. + * + * @see talloc_set_log_stderr() + * @see talloc_set_abort_fn() + */ +_PUBLIC_ void talloc_set_log_fn(void (*log_fn)(const char *message)); + +/** + * @brief Set stderr as the output for logs. + * + * @see talloc_set_log_fn() + * @see talloc_set_abort_fn() + */ +_PUBLIC_ void talloc_set_log_stderr(void); + +/** + * @brief Set a max memory limit for the current context hierarchy + * This affects all children of this context and constrain any + * allocation in the hierarchy to never exceed the limit set. + * The limit can be removed by setting 0 (unlimited) as the + * max_size by calling the function again on the same context. + * Memory limits can also be nested, meaning a child can have + * a stricter memory limit than a parent. + * Memory limits are enforced only at memory allocation time. + * Stealing a context into a 'limited' hierarchy properly + * updates memory usage but does *not* cause failure if the + * move causes the new parent to exceed its limits. However + * any further allocation on that hierarchy will then fail. + * + * @warning talloc memlimit functionality is deprecated. Please + * consider using cgroup memory limits instead. + * + * @param[in] ctx The talloc context to set the limit on + * @param[in] max_size The (new) max_size + */ +_PUBLIC_ int talloc_set_memlimit(const void *ctx, size_t max_size) _DEPRECATED_; + +/* @} ******************************************************************/ + +#if TALLOC_DEPRECATED +#define talloc_zero_p(ctx, type) talloc_zero(ctx, type) +#define talloc_p(ctx, type) talloc(ctx, type) +#define talloc_array_p(ctx, type, count) talloc_array(ctx, type, count) +#define talloc_realloc_p(ctx, p, type, count) talloc_realloc(ctx, p, type, count) +#define talloc_destroy(ctx) talloc_free(ctx) +#define talloc_append_string(c, s, a) (s?talloc_strdup_append(s,a):talloc_strdup(c, a)) +#endif + +#ifndef TALLOC_MAX_DEPTH +#define TALLOC_MAX_DEPTH 10000 +#endif + +#ifdef __cplusplus +} /* end of extern "C" */ +#endif + +#endif diff --git a/talloc.pc.in b/talloc.pc.in new file mode 100644 index 0000000..437281a --- /dev/null +++ b/talloc.pc.in @@ -0,0 +1,11 @@ +prefix=@prefix@ +exec_prefix=@exec_prefix@ +libdir=@libdir@ +includedir=@includedir@ + +Name: talloc +Description: A hierarchical pool based memory system with destructors +Version: @TALLOC_VERSION@ +Libs: @LIB_RPATH@ -L${libdir} -ltalloc +Cflags: -I${includedir} +URL: http://talloc.samba.org/ diff --git a/talloc_guide.txt b/talloc_guide.txt new file mode 100644 index 0000000..dedda6c --- /dev/null +++ b/talloc_guide.txt @@ -0,0 +1,768 @@ +Using talloc in Samba4 +====================== + +.. contents:: + +Andrew Tridgell +August 2009 + +The most current version of this document is available at + http://samba.org/ftp/unpacked/talloc/talloc_guide.txt + +If you are used to the "old" talloc from Samba3 before 3.0.20 then please read +this carefully, as talloc has changed a lot. With 3.0.20 (or 3.0.14?) the +Samba4 talloc has been ported back to Samba3, so this guide applies to both. + +The new talloc is a hierarchical, reference counted memory pool system +with destructors. Quite a mouthful really, but not too bad once you +get used to it. + +Perhaps the biggest change from Samba3 is that there is no distinction +between a "talloc context" and a "talloc pointer". Any pointer +returned from talloc() is itself a valid talloc context. This means +you can do this:: + + struct foo *X = talloc(mem_ctx, struct foo); + X->name = talloc_strdup(X, "foo"); + +and the pointer X->name would be a "child" of the talloc context "X" +which is itself a child of "mem_ctx". So if you do talloc_free(mem_ctx) +then it is all destroyed, whereas if you do talloc_free(X) then just X +and X->name are destroyed, and if you do talloc_free(X->name) then +just the name element of X is destroyed. + +If you think about this, then what this effectively gives you is an +n-ary tree, where you can free any part of the tree with +talloc_free(). + +If you find this confusing, then I suggest you run the testsuite to +watch talloc in action. You may also like to add your own tests to +testsuite.c to clarify how some particular situation is handled. + + +Performance +----------- + +All the additional features of talloc() over malloc() do come at a +price. We have a simple performance test in Samba4 that measures +talloc() versus malloc() performance, and it seems that talloc() is +about 4% slower than malloc() on my x86 Debian Linux box. For Samba, +the great reduction in code complexity that we get by using talloc +makes this worthwhile, especially as the total overhead of +talloc/malloc in Samba is already quite small. + + +talloc API +---------- + +The following is a complete guide to the talloc API. Read it all at +least twice. + +Multi-threading +--------------- + +talloc itself does not deal with threads. It is thread-safe (assuming +the underlying "malloc" is), as long as each thread uses different +memory contexts. +If two threads use the same context then they need to synchronize in +order to be safe. In particular: +- when using talloc_enable_leak_report(), giving directly NULL as a +parent context implicitly refers to a hidden "null context" global +variable, so this should not be used in a multi-threaded environment +without proper synchronization. In threaded code turn off null tracking using +talloc_disable_null_tracking(). ; +- the context returned by talloc_autofree_context() is also global so +shouldn't be used by several threads simultaneously without +synchronization. + +talloc and shared objects +------------------------- + +talloc can be used in shared objects. Special care needs to be taken +to never use talloc_autofree_context() in code that might be loaded +with dlopen() and unloaded with dlclose(), as talloc_autofree_context() +internally uses atexit(3). Some platforms like modern Linux handles +this fine, but for example FreeBSD does not deal well with dlopen() +and atexit() used simultaneously: dlclose() does not clean up the list +of atexit-handlers, so when the program exits the code that was +registered from within talloc_autofree_context() is gone, the program +crashes at exit. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(type *)talloc(const void *context, type); + +The talloc() macro is the core of the talloc library. It takes a +memory context and a type, and returns a pointer to a new area of +memory of the given type. + +The returned pointer is itself a talloc context, so you can use it as +the context argument to more calls to talloc if you wish. + +The returned pointer is a "child" of the supplied context. This means +that if you talloc_free() the context then the new child disappears as +well. Alternatively you can free just the child. + +The context argument to talloc() can be NULL, in which case a new top +level context is created. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_size(const void *context, size_t size); + +The function talloc_size() should be used when you don't have a +convenient type to pass to talloc(). Unlike talloc(), it is not type +safe (as it returns a void *), so you are on your own for type checking. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(typeof(ptr)) talloc_ptrtype(const void *ctx, ptr); + +The talloc_ptrtype() macro should be used when you have a pointer and +want to allocate memory to point at with this pointer. When compiling +with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_size() +and talloc_get_name() will return the current location in the source file. +and not the type. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +int talloc_free(void *ptr); + +The talloc_free() function frees a piece of talloc memory, and all its +children. You can call talloc_free() on any pointer returned by +talloc(). + +The return value of talloc_free() indicates success or failure, with 0 +returned for success and -1 for failure. A possible failure condition +is if the pointer had a destructor attached to it and the destructor +returned -1. See talloc_set_destructor() for details on +destructors. Likewise, if "ptr" is NULL, then the function will make +no modifications and returns -1. + +From version 2.0 and onwards, as a special case, talloc_free() is +refused on pointers that have more than one parent associated, as talloc +would have no way of knowing which parent should be removed. This is +different from older versions in the sense that always the reference to +the most recently established parent has been destroyed. Hence to free a +pointer that has more than one parent please use talloc_unlink(). + +To help you find problems in your code caused by this behaviour, if +you do try and free a pointer with more than one parent then the +talloc logging function will be called to give output like this: + + ERROR: talloc_free with references at some_dir/source/foo.c:123 + reference at some_dir/source/other.c:325 + reference at some_dir/source/third.c:121 + +Please see the documentation for talloc_set_log_fn() and +talloc_set_log_stderr() for more information on talloc logging +functions. + +talloc_free() operates recursively on its children. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_free_children(void *ptr); + +The talloc_free_children() walks along the list of all children of a +talloc context and talloc_free()s only the children, not the context +itself. + +A NULL argument is handled as no-op. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_reference(const void *context, const void *ptr); + +The talloc_reference() function makes "context" an additional parent +of "ptr". + +The return value of talloc_reference() is always the original pointer +"ptr", unless talloc ran out of memory in creating the reference in +which case it will return NULL (each additional reference consumes +around 48 bytes of memory on intel x86 platforms). + +If "ptr" is NULL, then the function is a no-op, and simply returns NULL. + +After creating a reference you can free it in one of the following +ways: + + - you can talloc_free() any parent of the original pointer. That + will reduce the number of parents of this pointer by 1, and will + cause this pointer to be freed if it runs out of parents. + + - you can talloc_free() the pointer itself if it has at maximum one + parent. This behaviour has been changed since the release of version + 2.0. Further information in the description of "talloc_free". + +For more control on which parent to remove, see talloc_unlink() + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +int talloc_unlink(const void *context, void *ptr); + +The talloc_unlink() function removes a specific parent from ptr. The +context passed must either be a context used in talloc_reference() +with this pointer, or must be a direct parent of ptr. + +Note that if the parent has already been removed using talloc_free() +then this function will fail and will return -1. Likewise, if "ptr" +is NULL, then the function will make no modifications and return -1. + +You can just use talloc_free() instead of talloc_unlink() if there +is at maximum one parent. This behaviour has been changed since the +release of version 2.0. Further information in the description of +"talloc_free". + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_set_destructor(const void *ptr, int (*destructor)(void *)); + +The function talloc_set_destructor() sets the "destructor" for the +pointer "ptr". A destructor is a function that is called when the +memory used by a pointer is about to be released. The destructor +receives the pointer as an argument, and should return 0 for success +and -1 for failure. + +The destructor can do anything it wants to, including freeing other +pieces of memory. A common use for destructors is to clean up +operating system resources (such as open file descriptors) contained +in the structure the destructor is placed on. + +You can only place one destructor on a pointer. If you need more than +one destructor then you can create a zero-length child of the pointer +and place an additional destructor on that. + +To remove a destructor call talloc_set_destructor() with NULL for the +destructor. + +If your destructor attempts to talloc_free() the pointer that it is +the destructor for then talloc_free() will return -1 and the free will +be ignored. This would be a pointless operation anyway, as the +destructor is only called when the memory is just about to go away. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +int talloc_increase_ref_count(const void *ptr); + +The talloc_increase_ref_count(ptr) function is exactly equivalent to: + + talloc_reference(NULL, ptr); + +You can use either syntax, depending on which you think is clearer in +your code. + +It returns 0 on success and -1 on failure. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +size_t talloc_reference_count(const void *ptr); + +Return the number of references to the pointer. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_set_name(const void *ptr, const char *fmt, ...); + +Each talloc pointer has a "name". The name is used principally for +debugging purposes, although it is also possible to set and get the +name on a pointer in as a way of "marking" pointers in your code. + +The main use for names on pointer is for "talloc reports". See +talloc_report() and talloc_report_full() for details. Also see +talloc_enable_leak_report() and talloc_enable_leak_report_full(). + +The talloc_set_name() function allocates memory as a child of the +pointer. It is logically equivalent to: + talloc_set_name_const(ptr, talloc_asprintf(ptr, fmt, ...)); + +Note that multiple calls to talloc_set_name() will allocate more +memory without releasing the name. All of the memory is released when +the ptr is freed using talloc_free(). + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_set_name_const(const void *ptr, const char *name); + +The function talloc_set_name_const() is just like talloc_set_name(), +but it takes a string constant, and is much faster. It is extensively +used by the "auto naming" macros, such as talloc_p(). + +This function does not allocate any memory. It just copies the +supplied pointer into the internal representation of the talloc +ptr. This means you must not pass a name pointer to memory that will +disappear before the ptr is freed with talloc_free(). + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_named(const void *context, size_t size, const char *fmt, ...); + +The talloc_named() function creates a named talloc pointer. It is +equivalent to: + + ptr = talloc_size(context, size); + talloc_set_name(ptr, fmt, ....); + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_named_const(const void *context, size_t size, const char *name); + +This is equivalent to:: + + ptr = talloc_size(context, size); + talloc_set_name_const(ptr, name); + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +const char *talloc_get_name(const void *ptr); + +This returns the current name for the given talloc pointer. See +talloc_set_name() for details. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_init(const char *fmt, ...); + +This function creates a zero length named talloc context as a top +level context. It is equivalent to:: + + talloc_named(NULL, 0, fmt, ...); + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_new(void *ctx); + +This is a utility macro that creates a new memory context hanging +off an exiting context, automatically naming it "talloc_new: __location__" +where __location__ is the source line it is called from. It is +particularly useful for creating a new temporary working context. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(type *)talloc_realloc(const void *context, void *ptr, type, count); + +The talloc_realloc() macro changes the size of a talloc +pointer. The "count" argument is the number of elements of type "type" +that you want the resulting pointer to hold. + +talloc_realloc() has the following equivalences:: + + talloc_realloc(context, NULL, type, 1) ==> talloc(context, type); + talloc_realloc(context, NULL, type, N) ==> talloc_array(context, type, N); + talloc_realloc(context, ptr, type, 0) ==> talloc_free(ptr); + +The "context" argument is only used if "ptr" is NULL, otherwise it is +ignored. + +talloc_realloc() returns the new pointer, or NULL on failure. The call +will fail either due to a lack of memory, or because the pointer has +more than one parent (see talloc_reference()). + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_realloc_size(const void *context, void *ptr, size_t size); + +the talloc_realloc_size() function is useful when the type is not +known so the typesafe talloc_realloc() cannot be used. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_steal(const void *new_ctx, const void *ptr); + +The talloc_steal() function changes the parent context of a talloc +pointer. It is typically used when the context that the pointer is +currently a child of is going to be freed and you wish to keep the +memory for a longer time. + +The talloc_steal() function returns the pointer that you pass it. It +does not have any failure modes. + +NOTE: It is possible to produce loops in the parent/child relationship +if you are not careful with talloc_steal(). No guarantees are provided +as to your sanity or the safety of your data if you do this. + +talloc_steal (new_ctx, NULL) will return NULL with no sideeffects. + +Note that if you try and call talloc_steal() on a pointer that has +more than one parent then the result is ambiguous. Talloc will choose +to remove the parent that is currently indicated by talloc_parent() +and replace it with the chosen parent. You will also get a message +like this via the talloc logging functions: + + WARNING: talloc_steal with references at some_dir/source/foo.c:123 + reference at some_dir/source/other.c:325 + reference at some_dir/source/third.c:121 + +To unambiguously change the parent of a pointer please see the +function talloc_reparent(). See the talloc_set_log_fn() documentation +for more information on talloc logging. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_reparent(const void *old_parent, const void *new_parent, const void *ptr); + +The talloc_reparent() function changes the parent context of a talloc +pointer. It is typically used when the context that the pointer is +currently a child of is going to be freed and you wish to keep the +memory for a longer time. + +The talloc_reparent() function returns the pointer that you pass it. It +does not have any failure modes. + +The difference between talloc_reparent() and talloc_steal() is that +talloc_reparent() can specify which parent you wish to change. This is +useful when a pointer has multiple parents via references. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_parent(const void *ptr); + +The talloc_parent() function returns the current talloc parent. This +is usually the pointer under which this memory was originally created, +but it may have changed due to a talloc_steal() or talloc_reparent() + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +size_t talloc_total_size(const void *ptr); + +The talloc_total_size() function returns the total size in bytes used +by this pointer and all child pointers. Mostly useful for debugging. + +Passing NULL is allowed, but it will only give a meaningful result if +talloc_enable_leak_report() or talloc_enable_leak_report_full() has +been called. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +size_t talloc_total_blocks(const void *ptr); + +The talloc_total_blocks() function returns the total memory block +count used by this pointer and all child pointers. Mostly useful for +debugging. + +Passing NULL is allowed, but it will only give a meaningful result if +talloc_enable_leak_report() or talloc_enable_leak_report_full() has +been called. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_report_depth_cb(const void *ptr, int depth, int max_depth, + void (*callback)(const void *ptr, + int depth, int max_depth, + int is_ref, + void *priv), + void *priv); + +This provides a more flexible reports than talloc_report(). It +will recursively call the callback for the entire tree of memory +referenced by the pointer. References in the tree are passed with +is_ref = 1 and the pointer that is referenced. + +You can pass NULL for the pointer, in which case a report is +printed for the top level memory context, but only if +talloc_enable_leak_report() or talloc_enable_leak_report_full() +has been called. + +The recursion is stopped when depth >= max_depth. +max_depth = -1 means only stop at leaf nodes. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_report_depth_file(const void *ptr, int depth, int max_depth, FILE *f); + +This provides a more flexible reports than talloc_report(). It +will let you specify the depth and max_depth. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_report(const void *ptr, FILE *f); + +The talloc_report() function prints a summary report of all memory +used by ptr. One line of report is printed for each immediate child of +ptr, showing the total memory and number of blocks used by that child. + +You can pass NULL for the pointer, in which case a report is printed +for the top level memory context, but only if +talloc_enable_leak_report() or talloc_enable_leak_report_full() has +been called. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_report_full(const void *ptr, FILE *f); + +This provides a more detailed report than talloc_report(). It will +recursively print the entire tree of memory referenced by the +pointer. References in the tree are shown by giving the name of the +pointer that is referenced. + +You can pass NULL for the pointer, in which case a report is printed +for the top level memory context, but only if +talloc_enable_leak_report() or talloc_enable_leak_report_full() has +been called. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_enable_leak_report(void); + +This enables calling of talloc_report(NULL, stderr) when the program +exits. In Samba4 this is enabled by using the --leak-report command +line option. + +For it to be useful, this function must be called before any other +talloc function as it establishes a "null context" that acts as the +top of the tree. If you don't call this function first then passing +NULL to talloc_report() or talloc_report_full() won't give you the +full tree printout. + +Here is a typical talloc report: + +talloc report on 'null_context' (total 267 bytes in 15 blocks) + libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks + libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks + iconv(UTF8,CP850) contains 42 bytes in 2 blocks + libcli/auth/spnego_parse.c:55 contains 31 bytes in 2 blocks + iconv(CP850,UTF8) contains 42 bytes in 2 blocks + iconv(UTF8,UTF-16LE) contains 45 bytes in 2 blocks + iconv(UTF-16LE,UTF8) contains 45 bytes in 2 blocks + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_enable_leak_report_full(void); + +This enables calling of talloc_report_full(NULL, stderr) when the +program exits. In Samba4 this is enabled by using the +--leak-report-full command line option. + +For it to be useful, this function must be called before any other +talloc function as it establishes a "null context" that acts as the +top of the tree. If you don't call this function first then passing +NULL to talloc_report() or talloc_report_full() won't give you the +full tree printout. + +Here is a typical full report: + +full talloc report on 'root' (total 18 bytes in 8 blocks) + p1 contains 18 bytes in 7 blocks (ref 0) + r1 contains 13 bytes in 2 blocks (ref 0) + reference to: p2 + p2 contains 1 bytes in 1 blocks (ref 1) + x3 contains 1 bytes in 1 blocks (ref 0) + x2 contains 1 bytes in 1 blocks (ref 0) + x1 contains 1 bytes in 1 blocks (ref 0) + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_enable_null_tracking(void); + +This enables tracking of the NULL memory context without enabling leak +reporting on exit. Useful for when you want to do your own leak +reporting call via talloc_report_null_full(); + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_disable_null_tracking(void); + +This disables tracking of the NULL memory context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(type *)talloc_zero(const void *ctx, type); + +The talloc_zero() macro is equivalent to:: + + ptr = talloc(ctx, type); + if (ptr) memset(ptr, 0, sizeof(type)); + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_zero_size(const void *ctx, size_t size) + +The talloc_zero_size() function is useful when you don't have a known type + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_memdup(const void *ctx, const void *p, size_t size); + +The talloc_memdup() function is equivalent to:: + + ptr = talloc_size(ctx, size); + if (ptr) memcpy(ptr, p, size); + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_strdup(const void *ctx, const char *p); + +The talloc_strdup() function is equivalent to:: + + ptr = talloc_size(ctx, strlen(p)+1); + if (ptr) memcpy(ptr, p, strlen(p)+1); + +This functions sets the name of the new pointer to the passed +string. This is equivalent to:: + + talloc_set_name_const(ptr, ptr) + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_strndup(const void *t, const char *p, size_t n); + +The talloc_strndup() function is the talloc equivalent of the C +library function strndup() + +This functions sets the name of the new pointer to the passed +string. This is equivalent to: + talloc_set_name_const(ptr, ptr) + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_append_string(const void *t, char *orig, const char *append); + +The talloc_append_string() function appends the given formatted +string to the given string. + +This function sets the name of the new pointer to the new +string. This is equivalent to:: + + talloc_set_name_const(ptr, ptr) + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_vasprintf(const void *t, const char *fmt, va_list ap); + +The talloc_vasprintf() function is the talloc equivalent of the C +library function vasprintf() + +This functions sets the name of the new pointer to the new +string. This is equivalent to:: + + talloc_set_name_const(ptr, ptr) + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_asprintf(const void *t, const char *fmt, ...); + +The talloc_asprintf() function is the talloc equivalent of the C +library function asprintf() + +This functions sets the name of the new pointer to the new +string. This is equivalent to:: + + talloc_set_name_const(ptr, ptr) + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_asprintf_append(char *s, const char *fmt, ...); + +The talloc_asprintf_append() function appends the given formatted +string to the given string. +Use this variant when the string in the current talloc buffer may +have been truncated in length. + +This functions sets the name of the new pointer to the new +string. This is equivalent to:: + + talloc_set_name_const(ptr, ptr) + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +char *talloc_asprintf_append_buffer(char *s, const char *fmt, ...); + +The talloc_asprintf_append() function appends the given formatted +string to the end of the currently allocated talloc buffer. +Use this variant when the string in the current talloc buffer has +not been changed. + +This functions sets the name of the new pointer to the new +string. This is equivalent to:: + + talloc_set_name_const(ptr, ptr) + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +((type *)talloc_array(const void *ctx, type, unsigned int count); + +The talloc_array() macro is equivalent to:: + + (type *)talloc_size(ctx, sizeof(type) * count); + +except that it provides integer overflow protection for the multiply, +returning NULL if the multiply overflows. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_array_size(const void *ctx, size_t size, unsigned int count); + +The talloc_array_size() function is useful when the type is not +known. It operates in the same way as talloc_array(), but takes a size +instead of a type. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(typeof(ptr)) talloc_array_ptrtype(const void *ctx, ptr, unsigned int count); + +The talloc_ptrtype() macro should be used when you have a pointer to an array +and want to allocate memory of an array to point at with this pointer. When compiling +with gcc >= 3 it is typesafe. Note this is a wrapper of talloc_array_size() +and talloc_get_name() will return the current location in the source file. +and not the type. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_realloc_fn(const void *ctx, void *ptr, size_t size); + +This is a non-macro version of talloc_realloc(), which is useful +as libraries sometimes want a ralloc function pointer. A realloc() +implementation encapsulates the functionality of malloc(), free() and +realloc() in one call, which is why it is useful to be able to pass +around a single function pointer. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_autofree_context(void); + +This is a handy utility function that returns a talloc context +which will be automatically freed on program exit. This can be used +to reduce the noise in memory leak reports. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_check_name(const void *ptr, const char *name); + +This function checks if a pointer has the specified name. If it does +then the pointer is returned. It it doesn't then NULL is returned. + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(type *)talloc_get_type(const void *ptr, type); + +This macro allows you to do type checking on talloc pointers. It is +particularly useful for void* private pointers. It is equivalent to +this:: + + (type *)talloc_check_name(ptr, #type) + + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +talloc_set_type(const void *ptr, type); + +This macro allows you to force the name of a pointer to be of a +particular type. This can be used in conjunction with +talloc_get_type() to do type checking on void* pointers. + +It is equivalent to this:: + + talloc_set_name_const(ptr, #type) + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +talloc_get_size(const void *ctx); + +This function lets you know the amount of memory allocated so far by +this context. It does NOT account for subcontext memory. +This can be used to calculate the size of an array. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void *talloc_find_parent_byname(const void *ctx, const char *name); + +Find a parent memory context of the current context that has the given +name. This can be very useful in complex programs where it may be +difficult to pass all information down to the level you need, but you +know the structure you want is a parent of another context. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +(type *)talloc_find_parent_bytype(ctx, type); + +Like talloc_find_parent_byname() but takes a type, making it typesafe. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_set_log_fn(void (*log_fn)(const char *message)); + +This function sets a logging function that talloc will use for +warnings and errors. By default talloc will not print any warnings or +errors. + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=- +void talloc_set_log_stderr(void) + +This sets the talloc log function to write log messages to stderr. diff --git a/talloc_testsuite.h b/talloc_testsuite.h new file mode 100644 index 0000000..acb9701 --- /dev/null +++ b/talloc_testsuite.h @@ -0,0 +1,7 @@ +#ifndef __LIB_TALLOC_TALLOC_TESTSUITE_H__ +#define __LIB_TALLOC_TALLOC_TESTSUITE_H__ + +struct torture_context; +bool torture_local_talloc(struct torture_context *tctx); + +#endif diff --git a/test_magic_differs.sh b/test_magic_differs.sh new file mode 100755 index 0000000..1b6ba2e --- /dev/null +++ b/test_magic_differs.sh @@ -0,0 +1,16 @@ +#!/bin/sh +# This test ensures that two different talloc processes do not use the same +# magic value to lessen the opportunity for transferrable attacks. + +echo "test: magic differs" + +helper=$1 +m1=$($helper) +m2=$($helper) + +if [ $m1 -eq $m2 ]; then + echo "failure: magic remained the same between executions ($m1 vs $m2)" + exit 1 +fi + +echo "success: magic differs" diff --git a/test_magic_differs_helper.c b/test_magic_differs_helper.c new file mode 100644 index 0000000..6798827 --- /dev/null +++ b/test_magic_differs_helper.c @@ -0,0 +1,12 @@ +#include <stdio.h> +#include "talloc.h" + +/* + * This program is called by a testing shell script in order to ensure that + * if the library is loaded into different processes it uses different magic + * values in order to thwart security attacks. + */ +int main(int argc, char *argv[]) { + printf("%i\n", talloc_test_get_magic()); + return 0; +} diff --git a/test_pytalloc.c b/test_pytalloc.c new file mode 100644 index 0000000..fb38585 --- /dev/null +++ b/test_pytalloc.c @@ -0,0 +1,230 @@ +/* + Samba Unix SMB/CIFS implementation. + + C utilities for the pytalloc test suite. + Provides the "_test_pytalloc" Python module. + + NOTE: Please read talloc_guide.txt for full documentation + + Copyright (C) Petr Viktorin 2015 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "lib/replace/system/python.h" +#include <talloc.h> +#include <pytalloc.h> + +static PyObject *testpytalloc_new(PyTypeObject *mod, + PyObject *Py_UNUSED(ignored)) +{ + char *obj = talloc_strdup(NULL, "This is a test string");; + return pytalloc_steal(pytalloc_GetObjectType(), obj); +} + +static PyObject *testpytalloc_get_object_type(PyObject *mod, + PyObject *Py_UNUSED(ignored)) +{ + PyObject *type = (PyObject *)pytalloc_GetObjectType(); + Py_INCREF(type); + return type; +} + +static PyObject *testpytalloc_base_new(PyTypeObject *mod, + PyObject *Py_UNUSED(ignored)) +{ + char *obj = talloc_strdup(NULL, "This is a test string for a BaseObject");; + return pytalloc_steal(pytalloc_GetBaseObjectType(), obj); +} + +static PyObject *testpytalloc_base_get_object_type(PyObject *mod, + PyObject *Py_UNUSED(ignored)) +{ + PyObject *type = (PyObject *)pytalloc_GetBaseObjectType(); + Py_INCREF(type); + return type; +} + +static PyObject *testpytalloc_reference(PyObject *mod, PyObject *args) { + PyObject *source = NULL; + void *ptr; + + if (!PyArg_ParseTuple(args, "O!", pytalloc_GetObjectType(), &source)) + return NULL; + + ptr = pytalloc_get_ptr(source); + return pytalloc_reference_ex(pytalloc_GetObjectType(), ptr, ptr); +} + +static PyObject *testpytalloc_base_reference(PyObject *mod, PyObject *args) { + PyObject *source = NULL; + void *mem_ctx; + + if (!PyArg_ParseTuple(args, "O!", pytalloc_GetBaseObjectType(), &source)) { + return NULL; + } + mem_ctx = pytalloc_get_mem_ctx(source); + return pytalloc_reference_ex(pytalloc_GetBaseObjectType(), mem_ctx, mem_ctx); +} + +static PyMethodDef test_talloc_methods[] = { + { "new", (PyCFunction)testpytalloc_new, METH_NOARGS, + "create a talloc Object with a testing string"}, + { "get_object_type", (PyCFunction)testpytalloc_get_object_type, METH_NOARGS, + "call pytalloc_GetObjectType"}, + { "base_new", (PyCFunction)testpytalloc_base_new, METH_NOARGS, + "create a talloc BaseObject with a testing string"}, + { "base_get_object_type", (PyCFunction)testpytalloc_base_get_object_type, METH_NOARGS, + "call pytalloc_GetBaseObjectType"}, + { "reference", (PyCFunction)testpytalloc_reference, METH_VARARGS, + "call pytalloc_reference_ex"}, + { "base_reference", (PyCFunction)testpytalloc_base_reference, METH_VARARGS, + "call pytalloc_reference_ex"}, + {0} +}; + +static PyTypeObject DObject_Type; + +static int dobject_destructor(void *ptr) +{ + PyObject *destructor_func = *talloc_get_type(ptr, PyObject*); + PyObject *ret; + ret = PyObject_CallObject(destructor_func, NULL); + Py_DECREF(destructor_func); + if (ret == NULL) { + PyErr_Print(); + } else { + Py_DECREF(ret); + } + return 0; +} + +static PyObject *dobject_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) +{ + PyObject *destructor_func = NULL; + PyObject **obj; + + if (!PyArg_ParseTuple(args, "O", &destructor_func)) + return NULL; + Py_INCREF(destructor_func); + + obj = talloc(NULL, PyObject*); + *obj = destructor_func; + + talloc_set_destructor((void*)obj, dobject_destructor); + return pytalloc_steal(&DObject_Type, obj); +} + +static PyTypeObject DObject_Type = { + .tp_name = "_test_pytalloc.DObject", + .tp_basicsize = sizeof(pytalloc_Object), + .tp_methods = NULL, + .tp_new = dobject_new, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_doc = "test talloc object that calls a function when underlying data is freed\n", +}; + +static PyTypeObject DBaseObject_Type; + +static int d_base_object_destructor(void *ptr) +{ + PyObject *destructor_func = *talloc_get_type(ptr, PyObject*); + PyObject *ret; + ret = PyObject_CallObject(destructor_func, NULL); + Py_DECREF(destructor_func); + if (ret == NULL) { + PyErr_Print(); + } else { + Py_DECREF(ret); + } + return 0; +} + +static PyObject *d_base_object_new(PyTypeObject *type, PyObject *args, PyObject *kwargs) +{ + PyObject *destructor_func = NULL; + PyObject **obj; + + if (!PyArg_ParseTuple(args, "O", &destructor_func)) + return NULL; + Py_INCREF(destructor_func); + + obj = talloc(NULL, PyObject*); + *obj = destructor_func; + + talloc_set_destructor((void*)obj, d_base_object_destructor); + return pytalloc_steal(&DBaseObject_Type, obj); +} + +static PyTypeObject DBaseObject_Type = { + .tp_name = "_test_pytalloc.DBaseObject", + .tp_methods = NULL, + .tp_new = d_base_object_new, + .tp_flags = Py_TPFLAGS_DEFAULT, + .tp_doc = "test talloc object that calls a function when underlying data is freed\n", +}; + +#define MODULE_DOC PyDoc_STR("Test utility module for pytalloc") + +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + .m_name = "_test_pytalloc", + .m_doc = PyDoc_STR("Test utility module for pytalloc"), + .m_size = -1, + .m_methods = test_talloc_methods, +}; + +static PyObject *module_init(void); +static PyObject *module_init(void) +{ + PyObject *m; + + DObject_Type.tp_base = pytalloc_GetObjectType(); + if (PyType_Ready(&DObject_Type) < 0) { + return NULL; + } + + DBaseObject_Type.tp_basicsize = pytalloc_BaseObject_size(); + DBaseObject_Type.tp_base = pytalloc_GetBaseObjectType(); + if (PyType_Ready(&DBaseObject_Type) < 0) { + return NULL; + } + + m = PyModule_Create(&moduledef); + + if (m == NULL) { + return NULL; + } + + Py_INCREF(&DObject_Type); + Py_INCREF(DObject_Type.tp_base); + PyModule_AddObject(m, "DObject", (PyObject *)&DObject_Type); + + Py_INCREF(&DBaseObject_Type); + Py_INCREF(DBaseObject_Type.tp_base); + PyModule_AddObject(m, "DBaseObject", (PyObject *)&DBaseObject_Type); + + return m; +} + + +PyMODINIT_FUNC PyInit__test_pytalloc(void); +PyMODINIT_FUNC PyInit__test_pytalloc(void) +{ + return module_init(); +} diff --git a/test_pytalloc.py b/test_pytalloc.py new file mode 100644 index 0000000..809510f --- /dev/null +++ b/test_pytalloc.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python3 +# Simple tests for the talloc python bindings. +# Copyright (C) 2015 Petr Viktorin <pviktori@redhat.com> + +import unittest +import subprocess +import sys +import gc + +import talloc +import _test_pytalloc + + +def dummy_func(): + pass + + +class TallocTests(unittest.TestCase): + + def test_report_full(self): + # report_full is hardcoded to print to stdout, so use a subprocess + process = subprocess.Popen([ + sys.executable, '-c', + """if True: + import talloc, _test_pytalloc + obj = _test_pytalloc.new() + talloc.report_full(obj) + """ + ], stdout=subprocess.PIPE) + output, stderr = process.communicate() + output = str(output) + self.assertTrue("full talloc report on 'talloc.Object" in output) + self.assertTrue("This is a test string" in output) + + def test_totalblocks(self): + obj = _test_pytalloc.new() + # Two blocks: the string, and the name + self.assertEqual(talloc.total_blocks(obj), 2) + + def test_repr(self): + obj = _test_pytalloc.new() + prefix = '<talloc.Object talloc object at' + self.assertTrue(repr(obj).startswith(prefix)) + self.assertEqual(repr(obj), str(obj)) + + def test_base_repr(self): + obj = _test_pytalloc.base_new() + prefix = '<talloc.BaseObject talloc based object at' + self.assertTrue(repr(obj).startswith(prefix)) + self.assertEqual(repr(obj), str(obj)) + + def test_destructor(self): + # Check correct lifetime of the talloc'd data + lst = [] + obj = _test_pytalloc.DObject(lambda: lst.append('dead')) + self.assertEqual(lst, []) + del obj + gc.collect() + self.assertEqual(lst, ['dead']) + + def test_base_destructor(self): + # Check correct lifetime of the talloc'd data + lst = [] + obj = _test_pytalloc.DBaseObject(lambda: lst.append('dead')) + self.assertEqual(lst, []) + del obj + gc.collect() + self.assertEqual(lst, ['dead']) + + +class TallocComparisonTests(unittest.TestCase): + + def test_compare_same(self): + obj1 = _test_pytalloc.new() + self.assertTrue(obj1 == obj1) + self.assertFalse(obj1 != obj1) + self.assertTrue(obj1 <= obj1) + self.assertFalse(obj1 < obj1) + self.assertTrue(obj1 >= obj1) + self.assertFalse(obj1 > obj1) + + def test_compare_different(self): + # object comparison is consistent + obj1, obj2 = sorted([ + _test_pytalloc.new(), + _test_pytalloc.new()]) + self.assertFalse(obj1 == obj2) + self.assertTrue(obj1 != obj2) + self.assertTrue(obj1 <= obj2) + self.assertTrue(obj1 < obj2) + self.assertFalse(obj1 >= obj2) + self.assertFalse(obj1 > obj2) + + def test_compare_different_types(self): + # object comparison falls back to comparing types + if sys.version_info >= (3, 0): + # In Python 3, types are unorderable -- nothing to test + return + if talloc.Object < _test_pytalloc.DObject: + obj1 = _test_pytalloc.new() + obj2 = _test_pytalloc.DObject(dummy_func) + else: + obj2 = _test_pytalloc.new() + obj1 = _test_pytalloc.DObject(dummy_func) + self.assertFalse(obj1 == obj2) + self.assertTrue(obj1 != obj2) + self.assertTrue(obj1 <= obj2) + self.assertTrue(obj1 < obj2) + self.assertFalse(obj1 >= obj2) + self.assertFalse(obj1 > obj2) + + +class TallocBaseComparisonTests(unittest.TestCase): + + def test_compare_same(self): + obj1 = _test_pytalloc.base_new() + self.assertTrue(obj1 == obj1) + self.assertFalse(obj1 != obj1) + self.assertTrue(obj1 <= obj1) + self.assertFalse(obj1 < obj1) + self.assertTrue(obj1 >= obj1) + self.assertFalse(obj1 > obj1) + + def test_compare_different(self): + # object comparison is consistent + obj1, obj2 = sorted([ + _test_pytalloc.base_new(), + _test_pytalloc.base_new()]) + self.assertFalse(obj1 == obj2) + self.assertTrue(obj1 != obj2) + self.assertTrue(obj1 <= obj2) + self.assertTrue(obj1 < obj2) + self.assertFalse(obj1 >= obj2) + self.assertFalse(obj1 > obj2) + + def test_compare_different_types(self): + # object comparison falls back to comparing types + if sys.version_info >= (3, 0): + # In Python 3, types are unorderable -- nothing to test + return + if talloc.BaseObject < _test_pytalloc.DBaseObject: + obj1 = _test_pytalloc.base_new() + obj2 = _test_pytalloc.DBaseObject(dummy_func) + else: + obj2 = _test_pytalloc.base_new() + obj1 = _test_pytalloc.DBaseObject(dummy_func) + self.assertFalse(obj1 == obj2) + self.assertTrue(obj1 != obj2) + self.assertTrue(obj1 <= obj2) + self.assertTrue(obj1 < obj2) + self.assertFalse(obj1 >= obj2) + self.assertFalse(obj1 > obj2) + + +class TallocUtilTests(unittest.TestCase): + + def test_get_type(self): + self.assertTrue(talloc.Object is _test_pytalloc.get_object_type()) + + def test_reference(self): + # Check correct lifetime of the talloc'd data with multiple references + lst = [] + obj = _test_pytalloc.DObject(lambda: lst.append('dead')) + ref = _test_pytalloc.reference(obj) + del obj + gc.collect() + self.assertEqual(lst, []) + del ref + gc.collect() + self.assertEqual(lst, ['dead']) + + def test_get_base_type(self): + self.assertTrue(talloc.BaseObject is _test_pytalloc.base_get_object_type()) + + def test_base_reference(self): + # Check correct lifetime of the talloc'd data with multiple references + lst = [] + obj = _test_pytalloc.DBaseObject(lambda: lst.append('dead')) + ref = _test_pytalloc.base_reference(obj) + del obj + gc.collect() + self.assertEqual(lst, []) + del ref + gc.collect() + self.assertEqual(lst, ['dead']) + + +if __name__ == '__main__': + unittest.TestProgram() diff --git a/testsuite.c b/testsuite.c new file mode 100644 index 0000000..282ebc6 --- /dev/null +++ b/testsuite.c @@ -0,0 +1,2288 @@ +/* + Unix SMB/CIFS implementation. + + local testing of talloc routines. + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" +#include "system/time.h" +#include <talloc.h> + +#ifdef HAVE_PTHREAD +#include <pthread.h> +#endif + +#include <unistd.h> +#include <sys/wait.h> + +#ifdef NDEBUG +#undef NDEBUG +#endif + +#include <assert.h> + +#include "talloc_testsuite.h" + +static struct timeval private_timeval_current(void) +{ + struct timeval tv; + gettimeofday(&tv, NULL); + return tv; +} + +static double private_timeval_elapsed(struct timeval *tv) +{ + struct timeval tv2 = private_timeval_current(); + return (tv2.tv_sec - tv->tv_sec) + + (tv2.tv_usec - tv->tv_usec)*1.0e-6; +} + +#define torture_assert(test, expr, str) if (!(expr)) { \ + printf("failure: %s [\n%s: Expression %s failed: %s\n]\n", \ + test, __location__, #expr, str); \ + return false; \ +} + +#define torture_assert_str_equal(test, arg1, arg2, desc) \ + if (arg1 == NULL && arg2 == NULL) { /* OK, both NULL == equal */ \ + } else if (arg1 == NULL || arg2 == NULL) { \ + return false; \ + } else if (strcmp(arg1, arg2)) { \ + printf("failure: %s [\n%s: Expected %s, got %s: %s\n]\n", \ + test, __location__, arg1, arg2, desc); \ + return false; \ + } + +#define CHECK_SIZE(test, ptr, tsize) do { \ + if (talloc_total_size(ptr) != (tsize)) { \ + printf("failed: %s [\n%s: wrong '%s' tree size: got %u expected %u\n]\n", \ + test, __location__, #ptr, \ + (unsigned)talloc_total_size(ptr), \ + (unsigned)tsize); \ + talloc_report_full(ptr, stdout); \ + return false; \ + } \ +} while (0) + +#define CHECK_BLOCKS(test, ptr, tblocks) do { \ + if (talloc_total_blocks(ptr) != (tblocks)) { \ + printf("failed: %s [\n%s: wrong '%s' tree blocks: got %u expected %u\n]\n", \ + test, __location__, #ptr, \ + (unsigned)talloc_total_blocks(ptr), \ + (unsigned)tblocks); \ + talloc_report_full(ptr, stdout); \ + return false; \ + } \ +} while (0) + +#define CHECK_PARENT(test, ptr, parent) do { \ + if (talloc_parent(ptr) != (parent)) { \ + printf("failed: %s [\n%s: '%s' has wrong parent: got %p expected %p\n]\n", \ + test, __location__, #ptr, \ + talloc_parent(ptr), \ + (parent)); \ + talloc_report_full(ptr, stdout); \ + talloc_report_full(parent, stdout); \ + talloc_report_full(NULL, stdout); \ + return false; \ + } \ +} while (0) + +static unsigned int test_abort_count; + +#if 0 +static void test_abort_fn(const char *reason) +{ + printf("# test_abort_fn(%s)\n", reason); + test_abort_count++; +} + +static void test_abort_start(void) +{ + test_abort_count = 0; + talloc_set_abort_fn(test_abort_fn); +} +#endif + +static void test_abort_stop(void) +{ + test_abort_count = 0; + talloc_set_abort_fn(NULL); +} + +static void test_log_stdout(const char *message) +{ + fprintf(stdout, "%s", message); +} + +/* + test references +*/ +static bool test_ref1(void) +{ + void *root, *p1, *p2, *ref, *r1; + + printf("test: ref1\n# SINGLE REFERENCE FREE\n"); + + root = talloc_named_const(NULL, 0, "root"); + p1 = talloc_named_const(root, 1, "p1"); + p2 = talloc_named_const(p1, 1, "p2"); + talloc_named_const(p1, 1, "x1"); + talloc_named_const(p1, 2, "x2"); + talloc_named_const(p1, 3, "x3"); + + r1 = talloc_named_const(root, 1, "r1"); + ref = talloc_reference(r1, p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref1", p1, 5); + CHECK_BLOCKS("ref1", p2, 1); + CHECK_BLOCKS("ref1", ref, 1); + CHECK_BLOCKS("ref1", r1, 2); + + fprintf(stderr, "Freeing p2\n"); + talloc_unlink(r1, p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref1", p1, 5); + CHECK_BLOCKS("ref1", p2, 1); + CHECK_BLOCKS("ref1", r1, 1); + + fprintf(stderr, "Freeing p1\n"); + talloc_free(p1); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref1", r1, 1); + + fprintf(stderr, "Freeing r1\n"); + talloc_free(r1); + talloc_report_full(NULL, stderr); + + fprintf(stderr, "Testing NULL\n"); + if (talloc_reference(root, NULL)) { + return false; + } + + CHECK_BLOCKS("ref1", root, 1); + + CHECK_SIZE("ref1", root, 0); + + talloc_free(root); + printf("success: ref1\n"); + return true; +} + +/* + test references +*/ +static bool test_ref2(void) +{ + void *root, *p1, *p2, *ref, *r1; + + printf("test: ref2\n# DOUBLE REFERENCE FREE\n"); + root = talloc_named_const(NULL, 0, "root"); + p1 = talloc_named_const(root, 1, "p1"); + talloc_named_const(p1, 1, "x1"); + talloc_named_const(p1, 1, "x2"); + talloc_named_const(p1, 1, "x3"); + p2 = talloc_named_const(p1, 1, "p2"); + + r1 = talloc_named_const(root, 1, "r1"); + ref = talloc_reference(r1, p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref2", p1, 5); + CHECK_BLOCKS("ref2", p2, 1); + CHECK_BLOCKS("ref2", r1, 2); + + fprintf(stderr, "Freeing ref\n"); + talloc_unlink(r1, ref); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref2", p1, 5); + CHECK_BLOCKS("ref2", p2, 1); + CHECK_BLOCKS("ref2", r1, 1); + + fprintf(stderr, "Freeing p2\n"); + talloc_free(p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref2", p1, 4); + CHECK_BLOCKS("ref2", r1, 1); + + fprintf(stderr, "Freeing p1\n"); + talloc_free(p1); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref2", r1, 1); + + fprintf(stderr, "Freeing r1\n"); + talloc_free(r1); + talloc_report_full(root, stderr); + + CHECK_SIZE("ref2", root, 0); + + talloc_free(root); + printf("success: ref2\n"); + return true; +} + +/* + test references +*/ +static bool test_ref3(void) +{ + void *root, *p1, *p2, *ref, *r1; + + printf("test: ref3\n# PARENT REFERENCE FREE\n"); + + root = talloc_named_const(NULL, 0, "root"); + p1 = talloc_named_const(root, 1, "p1"); + p2 = talloc_named_const(root, 1, "p2"); + r1 = talloc_named_const(p1, 1, "r1"); + ref = talloc_reference(p2, r1); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref3", p1, 2); + CHECK_BLOCKS("ref3", p2, 2); + CHECK_BLOCKS("ref3", r1, 1); + CHECK_BLOCKS("ref3", ref, 1); + + fprintf(stderr, "Freeing p1\n"); + talloc_free(p1); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref3", p2, 2); + CHECK_BLOCKS("ref3", r1, 1); + + fprintf(stderr, "Freeing p2\n"); + talloc_free(p2); + talloc_report_full(root, stderr); + + CHECK_SIZE("ref3", root, 0); + + talloc_free(root); + + printf("success: ref3\n"); + return true; +} + +/* + test references +*/ +static bool test_ref4(void) +{ + void *root, *p1, *p2, *ref, *r1; + + printf("test: ref4\n# REFERRER REFERENCE FREE\n"); + + root = talloc_named_const(NULL, 0, "root"); + p1 = talloc_named_const(root, 1, "p1"); + talloc_named_const(p1, 1, "x1"); + talloc_named_const(p1, 1, "x2"); + talloc_named_const(p1, 1, "x3"); + p2 = talloc_named_const(p1, 1, "p2"); + + r1 = talloc_named_const(root, 1, "r1"); + ref = talloc_reference(r1, p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref4", p1, 5); + CHECK_BLOCKS("ref4", p2, 1); + CHECK_BLOCKS("ref4", ref, 1); + CHECK_BLOCKS("ref4", r1, 2); + + fprintf(stderr, "Freeing r1\n"); + talloc_free(r1); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref4", p1, 5); + CHECK_BLOCKS("ref4", p2, 1); + + fprintf(stderr, "Freeing p2\n"); + talloc_free(p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("ref4", p1, 4); + + fprintf(stderr, "Freeing p1\n"); + talloc_free(p1); + talloc_report_full(root, stderr); + + CHECK_SIZE("ref4", root, 0); + + talloc_free(root); + + printf("success: ref4\n"); + return true; +} + + +/* + test references +*/ +static bool test_unlink1(void) +{ + void *root, *p1, *p2, *ref, *r1; + + printf("test: unlink\n# UNLINK\n"); + + root = talloc_named_const(NULL, 0, "root"); + p1 = talloc_named_const(root, 1, "p1"); + talloc_named_const(p1, 1, "x1"); + talloc_named_const(p1, 1, "x2"); + talloc_named_const(p1, 1, "x3"); + p2 = talloc_named_const(p1, 1, "p2"); + + r1 = talloc_named_const(p1, 1, "r1"); + ref = talloc_reference(r1, p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("unlink", p1, 7); + CHECK_BLOCKS("unlink", p2, 1); + CHECK_BLOCKS("unlink", ref, 1); + CHECK_BLOCKS("unlink", r1, 2); + + fprintf(stderr, "Unreferencing r1\n"); + talloc_unlink(r1, p2); + talloc_report_full(root, stderr); + + CHECK_BLOCKS("unlink", p1, 6); + CHECK_BLOCKS("unlink", p2, 1); + CHECK_BLOCKS("unlink", r1, 1); + + fprintf(stderr, "Freeing p1\n"); + talloc_free(p1); + talloc_report_full(root, stderr); + + CHECK_SIZE("unlink", root, 0); + + talloc_free(root); + + printf("success: unlink\n"); + return true; +} + +static int fail_destructor(void *ptr) +{ + return -1; +} + +/* + miscellaneous tests to try to get a higher test coverage percentage +*/ +static bool test_misc(void) +{ + void *root, *p1; + char *p2; + double *d; + const char *name; + + printf("test: misc\n# MISCELLANEOUS\n"); + + root = talloc_new(NULL); + + p1 = talloc_size(root, 0x7fffffff); + torture_assert("misc", !p1, "failed: large talloc allowed\n"); + + p1 = talloc_strdup(root, "foo"); + talloc_increase_ref_count(p1); + talloc_increase_ref_count(p1); + talloc_increase_ref_count(p1); + CHECK_BLOCKS("misc", p1, 1); + CHECK_BLOCKS("misc", root, 2); + talloc_unlink(NULL, p1); + CHECK_BLOCKS("misc", p1, 1); + CHECK_BLOCKS("misc", root, 2); + talloc_unlink(NULL, p1); + CHECK_BLOCKS("misc", p1, 1); + CHECK_BLOCKS("misc", root, 2); + p2 = talloc_strdup(p1, "foo"); + torture_assert("misc", talloc_unlink(root, p2) == -1, + "failed: talloc_unlink() of non-reference context should return -1\n"); + torture_assert("misc", talloc_unlink(p1, p2) == 0, + "failed: talloc_unlink() of parent should succeed\n"); + talloc_unlink(NULL, p1); + CHECK_BLOCKS("misc", p1, 1); + CHECK_BLOCKS("misc", root, 2); + + name = talloc_set_name(p1, "my name is %s", "foo"); + torture_assert_str_equal("misc", talloc_get_name(p1), "my name is foo", + "failed: wrong name after talloc_set_name(my name is foo)"); + torture_assert_str_equal("misc", talloc_get_name(p1), name, + "failed: wrong name after talloc_set_name(my name is foo)"); + CHECK_BLOCKS("misc", p1, 2); + CHECK_BLOCKS("misc", root, 3); + + talloc_set_name_const(p1, NULL); + torture_assert_str_equal ("misc", talloc_get_name(p1), "UNNAMED", + "failed: wrong name after talloc_set_name(NULL)"); + CHECK_BLOCKS("misc", p1, 2); + CHECK_BLOCKS("misc", root, 3); + + torture_assert("misc", talloc_free(NULL) == -1, + "talloc_free(NULL) should give -1\n"); + + talloc_set_destructor(p1, fail_destructor); + torture_assert("misc", talloc_free(p1) == -1, + "Failed destructor should cause talloc_free to fail\n"); + talloc_set_destructor(p1, NULL); + + talloc_report(root, stderr); + + + p2 = (char *)talloc_zero_size(p1, 20); + torture_assert("misc", p2[19] == 0, "Failed to give zero memory\n"); + talloc_free(p2); + + torture_assert("misc", talloc_strdup(root, NULL) == NULL, + "failed: strdup on NULL should give NULL\n"); + + p2 = talloc_strndup(p1, "foo", 2); + torture_assert("misc", strcmp("fo", p2) == 0, + "strndup doesn't work\n"); + p2 = talloc_asprintf_append_buffer(p2, "o%c", 'd'); + torture_assert("misc", strcmp("food", p2) == 0, + "talloc_asprintf_append_buffer doesn't work\n"); + CHECK_BLOCKS("misc", p2, 1); + CHECK_BLOCKS("misc", p1, 3); + + p2 = talloc_asprintf_append_buffer(NULL, "hello %s", "world"); + torture_assert("misc", strcmp("hello world", p2) == 0, + "talloc_asprintf_append_buffer doesn't work\n"); + CHECK_BLOCKS("misc", p2, 1); + CHECK_BLOCKS("misc", p1, 3); + talloc_free(p2); + + d = talloc_array(p1, double, 0x20000000); + torture_assert("misc", !d, "failed: integer overflow not detected\n"); + + d = talloc_realloc(p1, d, double, 0x20000000); + torture_assert("misc", !d, "failed: integer overflow not detected\n"); + + talloc_free(p1); + CHECK_BLOCKS("misc", root, 1); + + p1 = talloc_named(root, 100, "%d bytes", 100); + CHECK_BLOCKS("misc", p1, 2); + CHECK_BLOCKS("misc", root, 3); + talloc_unlink(root, p1); + + p1 = talloc_init("%d bytes", 200); + p2 = talloc_asprintf(p1, "my test '%s'", "string"); + torture_assert_str_equal("misc", p2, "my test 'string'", + "failed: talloc_asprintf(\"my test '%%s'\", \"string\") gave: \"%s\""); + CHECK_BLOCKS("misc", p1, 3); + CHECK_SIZE("misc", p2, 17); + CHECK_BLOCKS("misc", root, 1); + talloc_unlink(NULL, p1); + + p1 = talloc_named_const(root, 10, "p1"); + p2 = (char *)talloc_named_const(root, 20, "p2"); + (void)talloc_reference(p1, p2); + talloc_report_full(root, stderr); + talloc_unlink(root, p2); + talloc_report_full(root, stderr); + CHECK_BLOCKS("misc", p2, 1); + CHECK_BLOCKS("misc", p1, 2); + CHECK_BLOCKS("misc", root, 3); + talloc_unlink(p1, p2); + talloc_unlink(root, p1); + + p1 = talloc_named_const(root, 10, "p1"); + p2 = (char *)talloc_named_const(root, 20, "p2"); + (void)talloc_reference(NULL, p2); + talloc_report_full(root, stderr); + talloc_unlink(root, p2); + talloc_report_full(root, stderr); + CHECK_BLOCKS("misc", p2, 1); + CHECK_BLOCKS("misc", p1, 1); + CHECK_BLOCKS("misc", root, 2); + talloc_unlink(NULL, p2); + talloc_unlink(root, p1); + + /* Test that talloc_unlink is a no-op */ + + torture_assert("misc", talloc_unlink(root, NULL) == -1, + "failed: talloc_unlink(root, NULL) == -1\n"); + + talloc_report(root, stderr); + talloc_report(NULL, stderr); + + CHECK_SIZE("misc", root, 0); + + talloc_free(root); + + CHECK_SIZE("misc", NULL, 0); + + talloc_enable_null_tracking_no_autofree(); + talloc_enable_leak_report(); + talloc_enable_leak_report_full(); + + printf("success: misc\n"); + + return true; +} + + +/* + test realloc +*/ +static bool test_realloc(void) +{ + void *root, *p1, *p2; + + printf("test: realloc\n# REALLOC\n"); + + root = talloc_new(NULL); + + p1 = talloc_size(root, 10); + CHECK_SIZE("realloc", p1, 10); + + p1 = talloc_realloc_size(NULL, p1, 20); + CHECK_SIZE("realloc", p1, 20); + + talloc_new(p1); + + p2 = talloc_realloc_size(p1, NULL, 30); + + talloc_new(p1); + + p2 = talloc_realloc_size(p1, p2, 40); + + CHECK_SIZE("realloc", p2, 40); + CHECK_SIZE("realloc", root, 60); + CHECK_BLOCKS("realloc", p1, 4); + + p1 = talloc_realloc_size(NULL, p1, 20); + CHECK_SIZE("realloc", p1, 60); + + talloc_increase_ref_count(p2); + torture_assert("realloc", talloc_realloc_size(NULL, p2, 5) == NULL, + "failed: talloc_realloc() on a referenced pointer should fail\n"); + CHECK_BLOCKS("realloc", p1, 4); + + talloc_realloc_size(NULL, p2, 0); + talloc_realloc_size(NULL, p2, 0); + CHECK_BLOCKS("realloc", p1, 4); + talloc_realloc_size(p1, p2, 0); + CHECK_BLOCKS("realloc", p1, 3); + + torture_assert("realloc", talloc_realloc_size(NULL, p1, 0x7fffffff) == NULL, + "failed: oversize talloc should fail\n"); + + talloc_realloc_size(NULL, p1, 0); + CHECK_BLOCKS("realloc", root, 4); + talloc_realloc_size(root, p1, 0); + CHECK_BLOCKS("realloc", root, 1); + + CHECK_SIZE("realloc", root, 0); + + talloc_free(root); + + printf("success: realloc\n"); + + return true; +} + +/* + test realloc with a child +*/ +static bool test_realloc_child(void) +{ + void *root; + struct el2 { + const char *name; + } *el2, *el2_2, *el2_3, **el_list_save; + struct el1 { + int count; + struct el2 **list, **list2, **list3; + } *el1; + + printf("test: REALLOC WITH CHILD\n"); + + root = talloc_new(NULL); + + el1 = talloc(root, struct el1); + el1->list = talloc(el1, struct el2 *); + el1->list[0] = talloc(el1->list, struct el2); + el1->list[0]->name = talloc_strdup(el1->list[0], "testing"); + + el1->list2 = talloc(el1, struct el2 *); + el1->list2[0] = talloc(el1->list2, struct el2); + el1->list2[0]->name = talloc_strdup(el1->list2[0], "testing2"); + + el1->list3 = talloc(el1, struct el2 *); + el1->list3[0] = talloc(el1->list3, struct el2); + el1->list3[0]->name = talloc_strdup(el1->list3[0], "testing2"); + + el2 = talloc(el1->list, struct el2); + CHECK_PARENT("el2", el2, el1->list); + el2_2 = talloc(el1->list2, struct el2); + CHECK_PARENT("el2", el2_2, el1->list2); + el2_3 = talloc(el1->list3, struct el2); + CHECK_PARENT("el2", el2_3, el1->list3); + + el_list_save = el1->list; + el1->list = talloc_realloc(el1, el1->list, struct el2 *, 100); + if (el1->list == el_list_save) { + printf("failure: talloc_realloc didn't move pointer"); + return false; + } + + CHECK_PARENT("el1_after_realloc", el1->list, el1); + el1->list2 = talloc_realloc(el1, el1->list2, struct el2 *, 200); + CHECK_PARENT("el1_after_realloc", el1->list2, el1); + el1->list3 = talloc_realloc(el1, el1->list3, struct el2 *, 300); + CHECK_PARENT("el1_after_realloc", el1->list3, el1); + + CHECK_PARENT("el2", el2, el1->list); + CHECK_PARENT("el2", el2_2, el1->list2); + CHECK_PARENT("el2", el2_3, el1->list3); + + /* Finally check realloc with multiple children */ + el1 = talloc_realloc(root, el1, struct el1, 100); + CHECK_PARENT("el1->list", el1->list, el1); + CHECK_PARENT("el1->list2", el1->list2, el1); + CHECK_PARENT("el1->list3", el1->list3, el1); + + talloc_free(root); + + printf("success: REALLOC WITH CHILD\n"); + return true; +} + +/* + test type checking +*/ +static bool test_type(void) +{ + void *root; + struct el1 { + int count; + }; + struct el2 { + int count; + }; + struct el1 *el1; + + printf("test: type\n# talloc type checking\n"); + + root = talloc_new(NULL); + + el1 = talloc(root, struct el1); + + el1->count = 1; + + torture_assert("type", talloc_get_type(el1, struct el1) == el1, + "type check failed on el1\n"); + torture_assert("type", talloc_get_type(el1, struct el2) == NULL, + "type check failed on el1 with el2\n"); + talloc_set_type(el1, struct el2); + torture_assert("type", talloc_get_type(el1, struct el2) == (struct el2 *)el1, + "type set failed on el1 with el2\n"); + + talloc_free(root); + + printf("success: type\n"); + return true; +} + +/* + test steal +*/ +static bool test_steal(void) +{ + void *root, *p1, *p2; + + printf("test: steal\n# STEAL\n"); + + root = talloc_new(NULL); + + p1 = talloc_array(root, char, 10); + CHECK_SIZE("steal", p1, 10); + + p2 = talloc_realloc(root, NULL, char, 20); + CHECK_SIZE("steal", p1, 10); + CHECK_SIZE("steal", root, 30); + + torture_assert("steal", talloc_steal(p1, NULL) == NULL, + "failed: stealing NULL should give NULL\n"); + + torture_assert("steal", talloc_steal(p1, p1) == p1, + "failed: stealing to ourselves is a nop\n"); + CHECK_BLOCKS("steal", root, 3); + CHECK_SIZE("steal", root, 30); + + talloc_steal(NULL, p1); + talloc_steal(NULL, p2); + CHECK_BLOCKS("steal", root, 1); + CHECK_SIZE("steal", root, 0); + + talloc_free(p1); + talloc_steal(root, p2); + CHECK_BLOCKS("steal", root, 2); + CHECK_SIZE("steal", root, 20); + + talloc_free(p2); + + CHECK_BLOCKS("steal", root, 1); + CHECK_SIZE("steal", root, 0); + + talloc_free(root); + + p1 = talloc_size(NULL, 3); + talloc_report_full(NULL, stderr); + CHECK_SIZE("steal", NULL, 3); + talloc_free(p1); + + printf("success: steal\n"); + return true; +} + +/* + test move +*/ +static bool test_move(void) +{ + void *root; + struct t_move { + char *p; + int *x; + } *t1, *t2; + + printf("test: move\n# MOVE\n"); + + root = talloc_new(NULL); + + t1 = talloc(root, struct t_move); + t2 = talloc(root, struct t_move); + t1->p = talloc_strdup(t1, "foo"); + t1->x = talloc(t1, int); + *t1->x = 42; + + t2->p = talloc_move(t2, &t1->p); + t2->x = talloc_move(t2, &t1->x); + torture_assert("move", t1->p == NULL && t1->x == NULL && + strcmp(t2->p, "foo") == 0 && *t2->x == 42, + "talloc move failed"); + + talloc_free(root); + + printf("success: move\n"); + + return true; +} + +/* + test talloc_realloc_fn +*/ +static bool test_realloc_fn(void) +{ + void *root, *p1; + + printf("test: realloc_fn\n# talloc_realloc_fn\n"); + + root = talloc_new(NULL); + + p1 = talloc_realloc_fn(root, NULL, 10); + CHECK_BLOCKS("realloc_fn", root, 2); + CHECK_SIZE("realloc_fn", root, 10); + p1 = talloc_realloc_fn(root, p1, 20); + CHECK_BLOCKS("realloc_fn", root, 2); + CHECK_SIZE("realloc_fn", root, 20); + p1 = talloc_realloc_fn(root, p1, 0); + CHECK_BLOCKS("realloc_fn", root, 1); + CHECK_SIZE("realloc_fn", root, 0); + + talloc_free(root); + + printf("success: realloc_fn\n"); + return true; +} + + +static bool test_unref_reparent(void) +{ + void *root, *p1, *p2, *c1; + + printf("test: unref_reparent\n# UNREFERENCE AFTER PARENT FREED\n"); + + root = talloc_named_const(NULL, 0, "root"); + p1 = talloc_named_const(root, 1, "orig parent"); + p2 = talloc_named_const(root, 1, "parent by reference"); + + c1 = talloc_named_const(p1, 1, "child"); + talloc_reference(p2, c1); + + CHECK_PARENT("unref_reparent", c1, p1); + + talloc_free(p1); + + CHECK_PARENT("unref_reparent", c1, p2); + + talloc_unlink(p2, c1); + + CHECK_SIZE("unref_reparent", root, 1); + + talloc_free(p2); + talloc_free(root); + + printf("success: unref_reparent\n"); + return true; +} + +/* + measure the speed of talloc versus malloc +*/ +static bool test_speed(void) +{ + void *ctx = talloc_new(NULL); + unsigned count; + const int loop = 1000; + int i; + struct timeval tv; + + printf("test: speed\n# TALLOC VS MALLOC SPEED\n"); + + tv = private_timeval_current(); + count = 0; + do { + void *p1, *p2, *p3; + for (i=0;i<loop;i++) { + p1 = talloc_size(ctx, loop % 100); + p2 = talloc_strdup(p1, "foo bar"); + p3 = talloc_size(p1, 300); + (void)p2; + (void)p3; + talloc_free(p1); + } + count += 3 * loop; + } while (private_timeval_elapsed(&tv) < 5.0); + + fprintf(stderr, "talloc: %.0f ops/sec\n", count/private_timeval_elapsed(&tv)); + + talloc_free(ctx); + + ctx = talloc_pool(NULL, 1024); + + tv = private_timeval_current(); + count = 0; + do { + void *p1, *p2, *p3; + for (i=0;i<loop;i++) { + p1 = talloc_size(ctx, loop % 100); + p2 = talloc_strdup(p1, "foo bar"); + p3 = talloc_size(p1, 300); + (void)p2; + (void)p3; + talloc_free(p1); + } + count += 3 * loop; + } while (private_timeval_elapsed(&tv) < 5.0); + + talloc_free(ctx); + + fprintf(stderr, "talloc_pool: %.0f ops/sec\n", count/private_timeval_elapsed(&tv)); + + tv = private_timeval_current(); + count = 0; + do { + void *p1, *p2, *p3; + for (i=0;i<loop;i++) { + p1 = malloc(loop % 100); + p2 = strdup("foo bar"); + p3 = malloc(300); + free(p1); + free(p2); + free(p3); + } + count += 3 * loop; + } while (private_timeval_elapsed(&tv) < 5.0); + fprintf(stderr, "malloc: %.0f ops/sec\n", count/private_timeval_elapsed(&tv)); + + printf("success: speed\n"); + + return true; +} + +static bool test_lifeless(void) +{ + void *top = talloc_new(NULL); + char *parent, *child; + void *child_owner = talloc_new(NULL); + + printf("test: lifeless\n# TALLOC_UNLINK LOOP\n"); + + parent = talloc_strdup(top, "parent"); + child = talloc_strdup(parent, "child"); + (void)talloc_reference(child, parent); + (void)talloc_reference(child_owner, child); + talloc_report_full(top, stderr); + talloc_unlink(top, parent); + talloc_unlink(top, child); + talloc_report_full(top, stderr); + talloc_free(top); + talloc_free(child_owner); + talloc_free(child); + + printf("success: lifeless\n"); + return true; +} + +static int loop_destructor_count; + +static int test_loop_destructor(char *ptr) +{ + loop_destructor_count++; + return 0; +} + +static bool test_loop(void) +{ + void *top = talloc_new(NULL); + char *parent; + struct req1 { + char *req2, *req3; + } *req1; + + printf("test: loop\n# TALLOC LOOP DESTRUCTION\n"); + + parent = talloc_strdup(top, "parent"); + req1 = talloc(parent, struct req1); + req1->req2 = talloc_strdup(req1, "req2"); + talloc_set_destructor(req1->req2, test_loop_destructor); + req1->req3 = talloc_strdup(req1, "req3"); + (void)talloc_reference(req1->req3, req1); + talloc_report_full(top, stderr); + talloc_free(parent); + talloc_report_full(top, stderr); + talloc_report_full(NULL, stderr); + talloc_free(top); + + torture_assert("loop", loop_destructor_count == 1, + "FAILED TO FIRE LOOP DESTRUCTOR\n"); + loop_destructor_count = 0; + + printf("success: loop\n"); + return true; +} + +static int realloc_parent_destructor_count; + +static int test_realloc_parent_destructor(char *ptr) +{ + realloc_parent_destructor_count++; + return 0; +} + +static bool test_realloc_on_destructor_parent(void) +{ + void *top = talloc_new(NULL); + char *parent; + char *a, *b, *C, *D; + realloc_parent_destructor_count = 0; + + printf("test: free_for_exit\n# TALLOC FREE FOR EXIT\n"); + + parent = talloc_strdup(top, "parent"); + a = talloc_strdup(parent, "a"); + b = talloc_strdup(a, "b"); + C = talloc_strdup(a, "C"); + D = talloc_strdup(b, "D"); + talloc_set_destructor(D, test_realloc_parent_destructor); + /* Capitalised ones have destructors. + * + * parent --> a -> b -> D + * -> c + */ + + a = talloc_realloc(parent, a, char, 2048); + + torture_assert("check talloc_realloc", a != NULL, "talloc_realloc failed"); + + talloc_set_destructor(C, test_realloc_parent_destructor); + /* + * parent --> a[2048] -> b -> D + * -> C + * + */ + + talloc_free(parent); + + torture_assert("check destructor realloc_parent_destructor", + realloc_parent_destructor_count == 2, + "FAILED TO FIRE free_for_exit_destructor\n"); + + + printf("success: free_for_exit\n"); + talloc_free(top); /* make ASAN happy */ + + return true; +} + +static int fail_destructor_str(char *ptr) +{ + return -1; +} + +static bool test_free_parent_deny_child(void) +{ + void *top = talloc_new(NULL); + char *level1; + char *level2; + char *level3; + + printf("test: free_parent_deny_child\n# TALLOC FREE PARENT DENY CHILD\n"); + + level1 = talloc_strdup(top, "level1"); + level2 = talloc_strdup(level1, "level2"); + level3 = talloc_strdup(level2, "level3"); + + talloc_set_destructor(level3, fail_destructor_str); + talloc_free(level1); + talloc_set_destructor(level3, NULL); + + CHECK_PARENT("free_parent_deny_child", level3, top); + + talloc_free(top); + + printf("success: free_parent_deny_child\n"); + return true; +} + +struct new_parent { + void *new_parent; + char val[20]; +}; + +static int reparenting_destructor(struct new_parent *np) +{ + talloc_set_destructor(np, NULL); + (void)talloc_move(np->new_parent, &np); + return -1; +} + +static bool test_free_parent_reparent_child(void) +{ + void *top = talloc_new(NULL); + char *level1; + char *alternate_level1; + char *level2; + struct new_parent *level3; + + printf("test: free_parent_reparent_child\n# " + "TALLOC FREE PARENT REPARENT CHILD\n"); + + level1 = talloc_strdup(top, "level1"); + alternate_level1 = talloc_strdup(top, "alternate_level1"); + level2 = talloc_strdup(level1, "level2"); + level3 = talloc(level2, struct new_parent); + level3->new_parent = alternate_level1; + memset(level3->val, 'x', sizeof(level3->val)); + + talloc_set_destructor(level3, reparenting_destructor); + talloc_free(level1); + + CHECK_PARENT("free_parent_reparent_child", + level3, alternate_level1); + + talloc_free(top); + + printf("success: free_parent_reparent_child\n"); + return true; +} + +static bool test_free_parent_reparent_child_in_pool(void) +{ + void *top = talloc_new(NULL); + char *level1; + char *alternate_level1; + char *level2; + void *pool; + struct new_parent *level3; + + printf("test: free_parent_reparent_child_in_pool\n# " + "TALLOC FREE PARENT REPARENT CHILD IN POOL\n"); + + pool = talloc_pool(top, 1024); + level1 = talloc_strdup(pool, "level1"); + alternate_level1 = talloc_strdup(top, "alternate_level1"); + level2 = talloc_strdup(level1, "level2"); + level3 = talloc(level2, struct new_parent); + level3->new_parent = alternate_level1; + memset(level3->val, 'x', sizeof(level3->val)); + + talloc_set_destructor(level3, reparenting_destructor); + talloc_free(level1); + talloc_set_destructor(level3, NULL); + + CHECK_PARENT("free_parent_reparent_child_in_pool", + level3, alternate_level1); + + /* Even freeing alternate_level1 should leave pool alone. */ + talloc_free(alternate_level1); + talloc_free(top); + + printf("success: free_parent_reparent_child_in_pool\n"); + return true; +} + + +static bool test_talloc_ptrtype(void) +{ + void *top = talloc_new(NULL); + struct struct1 { + int foo; + int bar; + } *s1, *s2, **s3, ***s4; + const char *location1; + const char *location2; + const char *location3; + const char *location4; + + printf("test: ptrtype\n# TALLOC PTRTYPE\n"); + + s1 = talloc_ptrtype(top, s1);location1 = __location__; + + if (talloc_get_size(s1) != sizeof(struct struct1)) { + printf("failure: ptrtype [\n" + "talloc_ptrtype() allocated the wrong size %lu (should be %lu)\n" + "]\n", (unsigned long)talloc_get_size(s1), + (unsigned long)sizeof(struct struct1)); + return false; + } + + if (strcmp(location1, talloc_get_name(s1)) != 0) { + printf("failure: ptrtype [\n" + "talloc_ptrtype() sets the wrong name '%s' (should be '%s')\n]\n", + talloc_get_name(s1), location1); + return false; + } + + s2 = talloc_array_ptrtype(top, s2, 10);location2 = __location__; + + if (talloc_get_size(s2) != (sizeof(struct struct1) * 10)) { + printf("failure: ptrtype [\n" + "talloc_array_ptrtype() allocated the wrong size " + "%lu (should be %lu)\n]\n", + (unsigned long)talloc_get_size(s2), + (unsigned long)(sizeof(struct struct1)*10)); + return false; + } + + if (strcmp(location2, talloc_get_name(s2)) != 0) { + printf("failure: ptrtype [\n" + "talloc_array_ptrtype() sets the wrong name '%s' (should be '%s')\n]\n", + talloc_get_name(s2), location2); + return false; + } + + s3 = talloc_array_ptrtype(top, s3, 10);location3 = __location__; + + if (talloc_get_size(s3) != (sizeof(struct struct1 *) * 10)) { + printf("failure: ptrtype [\n" + "talloc_array_ptrtype() allocated the wrong size " + "%lu (should be %lu)\n]\n", + (unsigned long)talloc_get_size(s3), + (unsigned long)(sizeof(struct struct1 *)*10)); + return false; + } + + torture_assert_str_equal("ptrtype", location3, talloc_get_name(s3), + "talloc_array_ptrtype() sets the wrong name"); + + s4 = talloc_array_ptrtype(top, s4, 10);location4 = __location__; + + if (talloc_get_size(s4) != (sizeof(struct struct1 **) * 10)) { + printf("failure: ptrtype [\n" + "talloc_array_ptrtype() allocated the wrong size " + "%lu (should be %lu)\n]\n", + (unsigned long)talloc_get_size(s4), + (unsigned long)(sizeof(struct struct1 **)*10)); + return false; + } + + torture_assert_str_equal("ptrtype", location4, talloc_get_name(s4), + "talloc_array_ptrtype() sets the wrong name"); + + talloc_free(top); + + printf("success: ptrtype\n"); + return true; +} + +static int _test_talloc_free_in_destructor(void **ptr) +{ + talloc_free(*ptr); + return 0; +} + +static bool test_talloc_free_in_destructor(void) +{ + void *level0; + void *level1; + void *level2; + void *level3; + void *level4; + void **level5; + + printf("test: free_in_destructor\n# TALLOC FREE IN DESTRUCTOR\n"); + + level0 = talloc_new(NULL); + level1 = talloc_new(level0); + level2 = talloc_new(level1); + level3 = talloc_new(level2); + level4 = talloc_new(level3); + level5 = talloc(level4, void *); + + *level5 = level3; + (void)talloc_reference(level0, level3); + (void)talloc_reference(level3, level3); + (void)talloc_reference(level5, level3); + + talloc_set_destructor(level5, _test_talloc_free_in_destructor); + + talloc_free(level1); + + talloc_free(level0); + + talloc_free(level3); /* make ASAN happy */ + + printf("success: free_in_destructor\n"); + return true; +} + +static bool test_autofree(void) +{ +#if _SAMBA_BUILD_ < 4 + /* autofree test would kill smbtorture */ + void *p; + printf("test: autofree\n# TALLOC AUTOFREE CONTEXT\n"); + + p = talloc_autofree_context(); + talloc_free(p); + + p = talloc_autofree_context(); + talloc_free(p); + + printf("success: autofree\n"); +#endif + return true; +} + +static bool test_pool(void) +{ + void *pool; + void *p1, *p2, *p3, *p4; + void *p2_2; + + pool = talloc_pool(NULL, 1024); + + p1 = talloc_size(pool, 80); + memset(p1, 0x11, talloc_get_size(p1)); + p2 = talloc_size(pool, 20); + memset(p2, 0x11, talloc_get_size(p2)); + p3 = talloc_size(p1, 50); + memset(p3, 0x11, talloc_get_size(p3)); + p4 = talloc_size(p3, 1000); + memset(p4, 0x11, talloc_get_size(p4)); + + p2_2 = talloc_realloc_size(pool, p2, 20+1); + torture_assert("pool realloc 20+1", p2_2 == p2, "failed: pointer changed"); + memset(p2, 0x11, talloc_get_size(p2)); + p2_2 = talloc_realloc_size(pool, p2, 20-1); + torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); + memset(p2, 0x11, talloc_get_size(p2)); + p2_2 = talloc_realloc_size(pool, p2, 20-1); + torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); + memset(p2, 0x11, talloc_get_size(p2)); + + talloc_free(p3); + + /* this should reclaim the memory of p4 and p3 */ + p2_2 = talloc_realloc_size(pool, p2, 400); + torture_assert("pool realloc 400", p2_2 == p2, "failed: pointer changed"); + memset(p2, 0x11, talloc_get_size(p2)); + + talloc_free(p1); + + /* this should reclaim the memory of p1 */ + p2_2 = talloc_realloc_size(pool, p2, 800); + torture_assert("pool realloc 800", p2_2 == p1, "failed: pointer not changed"); + p2 = p2_2; + memset(p2, 0x11, talloc_get_size(p2)); + + /* this should do a malloc */ + p2_2 = talloc_realloc_size(pool, p2, 1800); + torture_assert("pool realloc 1800", p2_2 != p2, "failed: pointer not changed"); + p2 = p2_2; + memset(p2, 0x11, talloc_get_size(p2)); + + /* this should reclaim the memory from the pool */ + p3 = talloc_size(pool, 80); + torture_assert("pool alloc 80", p3 == p1, "failed: pointer changed"); + memset(p3, 0x11, talloc_get_size(p3)); + + talloc_free(p2); + talloc_free(p3); + + p1 = talloc_size(pool, 80); + memset(p1, 0x11, talloc_get_size(p1)); + p2 = talloc_size(pool, 20); + memset(p2, 0x11, talloc_get_size(p2)); + + talloc_free(p1); + + p2_2 = talloc_realloc_size(pool, p2, 20-1); + torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); + memset(p2, 0x11, talloc_get_size(p2)); + p2_2 = talloc_realloc_size(pool, p2, 20-1); + torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed"); + memset(p2, 0x11, talloc_get_size(p2)); + + /* this should do a malloc */ + p2_2 = talloc_realloc_size(pool, p2, 1800); + torture_assert("pool realloc 1800", p2_2 != p2, "failed: pointer not changed"); + p2 = p2_2; + memset(p2, 0x11, talloc_get_size(p2)); + + /* this should reclaim the memory from the pool */ + p3 = talloc_size(pool, 800); + torture_assert("pool alloc 800", p3 == p1, "failed: pointer changed"); + memset(p3, 0x11, talloc_get_size(p3)); + + talloc_free(pool); + + return true; +} + +static bool test_pool_steal(void) +{ + void *root; + void *pool; + void *p1, *p2; + void *p1_2, *p2_2; + size_t hdr; + size_t ofs1, ofs2; + + root = talloc_new(NULL); + pool = talloc_pool(root, 1024); + + p1 = talloc_size(pool, 4 * 16); + torture_assert("pool allocate 4 * 16", p1 != NULL, "failed "); + memset(p1, 0x11, talloc_get_size(p1)); + p2 = talloc_size(pool, 4 * 16); + torture_assert("pool allocate 4 * 16", p2 > p1, "failed: !(p2 > p1) "); + memset(p2, 0x11, talloc_get_size(p2)); + + ofs1 = PTR_DIFF(p2, p1); + hdr = ofs1 - talloc_get_size(p1); + + talloc_steal(root, p1); + talloc_steal(root, p2); + + talloc_free(pool); + + p1_2 = p1; + + p1_2 = talloc_realloc_size(root, p1, 5 * 16); + torture_assert("pool realloc 5 * 16", p1_2 > p2, "failed: pointer not changed"); + memset(p1_2, 0x11, talloc_get_size(p1_2)); + ofs1 = PTR_DIFF(p1_2, p2); + ofs2 = talloc_get_size(p2) + hdr; + + torture_assert("pool realloc ", ofs1 == ofs2, "failed: pointer offset unexpected"); + + p2_2 = talloc_realloc_size(root, p2, 3 * 16); + torture_assert("pool realloc 5 * 16", p2_2 == p2, "failed: pointer changed"); + memset(p2_2, 0x11, talloc_get_size(p2_2)); + + talloc_free(p1_2); + + p2_2 = p2; + + /* now we should reclaim the full pool */ + p2_2 = talloc_realloc_size(root, p2, 8 * 16); + torture_assert("pool realloc 8 * 16", p2_2 == p1, "failed: pointer not expected"); + p2 = p2_2; + memset(p2_2, 0x11, talloc_get_size(p2_2)); + + /* now we malloc and free the full pool space */ + p2_2 = talloc_realloc_size(root, p2, 2 * 1024); + torture_assert("pool realloc 2 * 1024", p2_2 != p1, "failed: pointer not expected"); + memset(p2_2, 0x11, talloc_get_size(p2_2)); + + talloc_free(p2_2); + + talloc_free(root); + + return true; +} + +static bool test_pool_nest(void) +{ + void *p1, *p2, *p3; + void *e = talloc_new(NULL); + + p1 = talloc_pool(NULL, 1024); + torture_assert("talloc_pool", p1 != NULL, "failed"); + + p2 = talloc_pool(p1, 500); + torture_assert("talloc_pool", p2 != NULL, "failed"); + + p3 = talloc_size(p2, 10); + + talloc_steal(e, p3); + + talloc_free(p2); + + talloc_free(p3); + + talloc_free(p1); + + talloc_free(e); /* make ASAN happy */ + + return true; +} + +struct pooled { + char *s1; + char *s2; + char *s3; +}; + +static bool test_pooled_object(void) +{ + struct pooled *p; + const char *s1 = "hello"; + const char *s2 = "world"; + const char *s3 = ""; + + p = talloc_pooled_object(NULL, struct pooled, 3, + strlen(s1)+strlen(s2)+strlen(s3)+3); + + if (talloc_get_size(p) != sizeof(struct pooled)) { + return false; + } + + p->s1 = talloc_strdup(p, s1); + + TALLOC_FREE(p->s1); + p->s1 = talloc_strdup(p, s2); + TALLOC_FREE(p->s1); + + p->s1 = talloc_strdup(p, s1); + p->s2 = talloc_strdup(p, s2); + p->s3 = talloc_strdup(p, s3); + + TALLOC_FREE(p); + return true; +} + +static bool test_free_ref_null_context(void) +{ + void *p1, *p2, *p3; + int ret; + + talloc_disable_null_tracking(); + p1 = talloc_new(NULL); + p2 = talloc_new(NULL); + + p3 = talloc_reference(p2, p1); + torture_assert("reference", p3 == p1, "failed: reference on null"); + + ret = talloc_free(p1); + torture_assert("ref free with null parent", ret == 0, "failed: free with null parent"); + talloc_free(p2); + + talloc_enable_null_tracking_no_autofree(); + p1 = talloc_new(NULL); + p2 = talloc_new(NULL); + + p3 = talloc_reference(p2, p1); + torture_assert("reference", p3 == p1, "failed: reference on null"); + + ret = talloc_free(p1); + torture_assert("ref free with null tracked parent", ret == 0, "failed: free with null parent"); + talloc_free(p2); + + return true; +} + +static bool test_rusty(void) +{ + void *root; + char *p1; + + talloc_enable_null_tracking(); + root = talloc_new(NULL); + p1 = talloc_strdup(root, "foo"); + talloc_increase_ref_count(p1); + talloc_report_full(root, stdout); + talloc_free(root); + CHECK_BLOCKS("null_context", NULL, 2); + talloc_free(p1); /* make ASAN happy */ + + return true; +} + +static bool test_free_children(void) +{ + void *root; + char *p1, *p2; + const char *name, *name2; + + talloc_enable_null_tracking(); + root = talloc_new(NULL); + p1 = talloc_strdup(root, "foo1"); + p2 = talloc_strdup(p1, "foo2"); + (void)p2; + + talloc_set_name(p1, "%s", "testname"); + talloc_free_children(p1); + /* check its still a valid talloc ptr */ + talloc_get_size(talloc_get_name(p1)); + if (strcmp(talloc_get_name(p1), "testname") != 0) { + return false; + } + + talloc_set_name(p1, "%s", "testname"); + name = talloc_get_name(p1); + talloc_free_children(p1); + /* check its still a valid talloc ptr */ + talloc_get_size(talloc_get_name(p1)); + torture_assert("name", name == talloc_get_name(p1), "name ptr changed"); + torture_assert("namecheck", strcmp(talloc_get_name(p1), "testname") == 0, + "wrong name"); + CHECK_BLOCKS("name1", p1, 2); + + /* note that this does not free the old child name */ + talloc_set_name_const(p1, "testname2"); + name2 = talloc_get_name(p1); + /* but this does */ + talloc_free_children(p1); + (void)name2; + torture_assert("namecheck", strcmp(talloc_get_name(p1), "testname2") == 0, + "wrong name"); + CHECK_BLOCKS("name1", p1, 1); + + talloc_report_full(root, stdout); + talloc_free(root); + return true; +} + +static bool test_memlimit(void) +{ + void *root; + char *l1, *l2, *l3, *l4, *l5, *t; + char *pool; + int i; + + printf("test: memlimit\n# MEMORY LIMITS\n"); + + printf("==== talloc_new(NULL)\n"); + root = talloc_new(NULL); + + talloc_report_full(root, stdout); + + printf("==== talloc_size(root, 2048)\n"); + l1 = talloc_size(root, 2048); + torture_assert("memlimit", l1 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_free(l1)\n"); + talloc_free(l1); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(root, level 1)\n"); + l1 = talloc_strdup(root, "level 1"); + torture_assert("memlimit", l1 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_set_memlimit(l1, 2048)\n"); + torture_assert("memlimit", talloc_set_memlimit(l1, 2048) == 0, + "failed: setting memlimit should never fail\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_size(root, 2048)\n"); + l2 = talloc_size(l1, 2048); + torture_assert("memlimit", l2 == NULL, + "failed: alloc should fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l1, level 2)\n"); + l2 = talloc_strdup(l1, "level 2"); + torture_assert("memlimit", l2 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_free(l2)\n"); + talloc_free(l2); + + talloc_report_full(root, stdout); + + printf("==== talloc_size(NULL, 2048)\n"); + l2 = talloc_size(NULL, 2048); + + talloc_report_full(root, stdout); + + printf("==== talloc_steal(l1, l2)\n"); + talloc_steal(l1, l2); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l2, level 3)\n"); + l3 = talloc_strdup(l2, "level 3"); + torture_assert("memlimit", l3 == NULL, + "failed: alloc should fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_free(l2)\n"); + talloc_free(l2); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(NULL, level 2)\n"); + l2 = talloc_strdup(NULL, "level 2"); + talloc_steal(l1, l2); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l2, level 3)\n"); + l3 = talloc_strdup(l2, "level 3"); + torture_assert("memlimit", l3 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_set_memlimit(l3, 1024)\n"); + torture_assert("memlimit", talloc_set_memlimit(l3, 1024) == 0, + "failed: setting memlimit should never fail\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l3, level 4)\n"); + l4 = talloc_strdup(l3, "level 4"); + torture_assert("memlimit", l4 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_set_memlimit(l4, 512)\n"); + torture_assert("memlimit", talloc_set_memlimit(l4, 512) == 0, + "failed: setting memlimit should never fail\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l4, level 5)\n"); + l5 = talloc_strdup(l4, "level 5"); + torture_assert("memlimit", l5 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_realloc(NULL, l5, char, 600)\n"); + t = talloc_realloc(NULL, l5, char, 600); + torture_assert("memlimit", t == NULL, + "failed: alloc should fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_realloc(NULL, l5, char, 5)\n"); + l5 = talloc_realloc(NULL, l5, char, 5); + torture_assert("memlimit", l5 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l3, level 4)\n"); + l4 = talloc_strdup(l3, "level 4"); + torture_assert("memlimit", l4 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_set_memlimit(l4, 512)\n"); + torture_assert("memlimit", talloc_set_memlimit(l4, 512) == 0, + "failed: setting memlimit should never fail\n"); + + talloc_report_full(root, stdout); + + printf("==== talloc_strdup(l4, level 5)\n"); + l5 = talloc_strdup(l4, "level 5"); + torture_assert("memlimit", l5 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + + printf("==== Make new temp context and steal l5\n"); + t = talloc_new(root); + talloc_steal(t, l5); + + talloc_report_full(root, stdout); + + printf("==== talloc_size(t, 2048)\n"); + l1 = talloc_size(t, 2048); + torture_assert("memlimit", l1 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(root, stdout); + talloc_free(root); + + /* Test memlimits with pools. */ + printf("==== talloc_pool(NULL, 10*1024)\n"); + pool = talloc_pool(NULL, 10*1024); + torture_assert("memlimit", pool != NULL, + "failed: alloc should not fail due to memory limit\n"); + + printf("==== talloc_set_memlimit(pool, 10*1024)\n"); + talloc_set_memlimit(pool, 10*1024); + for (i = 0; i < 9; i++) { + printf("==== talloc_size(pool, 1024) %i/10\n", i + 1); + l1 = talloc_size(pool, 1024); + torture_assert("memlimit", l1 != NULL, + "failed: alloc should not fail due to memory limit\n"); + talloc_report_full(pool, stdout); + } + /* The next alloc should fail. */ + printf("==== talloc_size(pool, 1024) 10/10\n"); + l2 = talloc_size(pool, 1024); + torture_assert("memlimit", l2 == NULL, + "failed: alloc should fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + /* Moving one of the children shouldn't change the limit, + as it's still inside the pool. */ + + printf("==== talloc_new(NULL)\n"); + root = talloc_new(NULL); + + printf("==== talloc_steal(root, l1)\n"); + talloc_steal(root, l1); + + printf("==== talloc_size(pool, 1024)\n"); + l2 = talloc_size(pool, 1024); + torture_assert("memlimit", l2 == NULL, + "failed: alloc should fail due to memory limit\n"); + + printf("==== talloc_free_children(pool)\n"); + talloc_free(l1); + talloc_free_children(pool); + + printf("==== talloc_size(pool, 1024)\n"); + l1 = talloc_size(pool, 1024); + + /* try reallocs of increasing size */ + for (i = 1; i < 9; i++) { + printf("==== talloc_realloc_size(NULL, l1, %i*1024) %i/10\n", i, i + 1); + l1 = talloc_realloc_size(NULL, l1, i*1024); + torture_assert("memlimit", l1 != NULL, + "failed: realloc should not fail due to memory limit\n"); + talloc_report_full(pool, stdout); + } + /* The next alloc should fail. */ + printf("==== talloc_realloc_size(NULL, l1, 10*1024) 10/10\n"); + l2 = talloc_realloc_size(NULL, l1, 10*1024); + torture_assert("memlimit", l2 == NULL, + "failed: realloc should fail due to memory limit\n"); + + /* Increase the memlimit */ + printf("==== talloc_set_memlimit(pool, 11*1024)\n"); + talloc_set_memlimit(pool, 11*1024); + + /* The final realloc should still fail + as the entire realloced chunk needs to be moved out of the pool */ + printf("==== talloc_realloc_size(NULL, l1, 10*1024) 10/10\n"); + l2 = talloc_realloc_size(NULL, l1, 10*1024); + torture_assert("memlimit", l2 == NULL, + "failed: realloc should fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + printf("==== talloc_set_memlimit(pool, 21*1024)\n"); + talloc_set_memlimit(pool, 21*1024); + + /* There's now sufficient space to move the chunk out of the pool */ + printf("==== talloc_realloc_size(NULL, l1, 10*1024) 10/10\n"); + l2 = talloc_realloc_size(NULL, l1, 10*1024); + torture_assert("memlimit", l2 != NULL, + "failed: realloc should not fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + /* ...which should mean smaller allocations can now occur within the pool */ + printf("==== talloc_size(pool, 9*1024)\n"); + l1 = talloc_size(pool, 9*1024); + torture_assert("memlimit", l1 != NULL, + "failed: new allocations should be allowed in the pool\n"); + + talloc_report_full(pool, stdout); + + /* But reallocs bigger than the pool will still fail */ + printf("==== talloc_realloc_size(NULL, l1, 10*1024)\n"); + l2 = talloc_realloc_size(NULL, l1, 10*1024); + torture_assert("memlimit", l2 == NULL, + "failed: realloc should fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + /* ..as well as allocs */ + printf("==== talloc_size(pool, 1024)\n"); + l1 = talloc_size(pool, 1024); + torture_assert("memlimit", l1 == NULL, + "failed: alloc should fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + printf("==== talloc_free_children(pool)\n"); + talloc_free_children(pool); + + printf("==== talloc_set_memlimit(pool, 1024)\n"); + talloc_set_memlimit(pool, 1024); + + /* We should still be able to allocate up to the pool limit + because the memlimit only applies to new heap allocations */ + printf("==== talloc_size(pool, 9*1024)\n"); + l1 = talloc_size(pool, 9*1024); + torture_assert("memlimit", l1 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + l1 = talloc_size(pool, 1024); + torture_assert("memlimit", l1 == NULL, + "failed: alloc should fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + printf("==== talloc_free_children(pool)\n"); + talloc_free_children(pool); + + printf("==== talloc_set_memlimit(pool, 10*1024)\n"); + talloc_set_memlimit(pool, 10*1024); + + printf("==== talloc_size(pool, 1024)\n"); + l1 = talloc_size(pool, 1024); + torture_assert("memlimit", l1 != NULL, + "failed: alloc should not fail due to memory limit\n"); + + talloc_report_full(pool, stdout); + + talloc_free(pool); + talloc_free(root); + printf("success: memlimit\n"); + + return true; +} + +#ifdef HAVE_PTHREAD + +#define NUM_THREADS 100 + +/* Sync variables. */ +static pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER; +static pthread_cond_t condvar = PTHREAD_COND_INITIALIZER; +static void *intermediate_ptr; + +/* Subthread. */ +static void *thread_fn(void *arg) +{ + int ret; + const char *ctx_name = (const char *)arg; + void *sub_ctx = NULL; + /* + * Do stuff that creates a new talloc hierarchy in + * this thread. + */ + void *top_ctx = talloc_named_const(NULL, 0, "top"); + if (top_ctx == NULL) { + return NULL; + } + sub_ctx = talloc_named_const(top_ctx, 100, ctx_name); + if (sub_ctx == NULL) { + return NULL; + } + + /* + * Now transfer a pointer from our hierarchy + * onto the intermediate ptr. + */ + ret = pthread_mutex_lock(&mtx); + if (ret != 0) { + talloc_free(top_ctx); + return NULL; + } + /* Wait for intermediate_ptr to be free. */ + while (intermediate_ptr != NULL) { + ret = pthread_cond_wait(&condvar, &mtx); + if (ret != 0) { + talloc_free(top_ctx); + ret = pthread_mutex_unlock(&mtx); + assert(ret == 0); + return NULL; + } + } + + /* and move our memory onto it from our toplevel hierarchy. */ + intermediate_ptr = talloc_move(NULL, &sub_ctx); + + /* Tell the main thread it's ready for pickup. */ + pthread_cond_broadcast(&condvar); + ret = pthread_mutex_unlock(&mtx); + assert(ret == 0); + + talloc_free(top_ctx); + return NULL; +} + +/* Main thread. */ +static bool test_pthread_talloc_passing(void) +{ + int i; + int ret; + char str_array[NUM_THREADS][20]; + pthread_t thread_id; + void *mem_ctx; + + /* + * Important ! Null tracking breaks threaded talloc. + * It *must* be turned off. + */ + talloc_disable_null_tracking(); + + printf("test: pthread_talloc_passing\n# PTHREAD TALLOC PASSING\n"); + + /* Main thread toplevel context. */ + mem_ctx = talloc_named_const(NULL, 0, "toplevel"); + if (mem_ctx == NULL) { + printf("failed to create toplevel context\n"); + return false; + } + + /* + * Spin off NUM_THREADS threads. + * They will use their own toplevel contexts. + */ + for (i = 0; i < NUM_THREADS; i++) { + ret = snprintf(str_array[i], + 20, + "thread:%d", + i); + if (ret < 0) { + printf("snprintf %d failed\n", i); + return false; + } + ret = pthread_create(&thread_id, + NULL, + thread_fn, + str_array[i]); + if (ret != 0) { + printf("failed to create thread %d (%d)\n", i, ret); + return false; + } + } + + printf("Created %d threads\n", NUM_THREADS); + + /* Now wait for NUM_THREADS transfers of the talloc'ed memory. */ + for (i = 0; i < NUM_THREADS; i++) { + ret = pthread_mutex_lock(&mtx); + if (ret != 0) { + printf("pthread_mutex_lock %d failed (%d)\n", i, ret); + talloc_free(mem_ctx); + return false; + } + + /* Wait for intermediate_ptr to have our data. */ + while (intermediate_ptr == NULL) { + ret = pthread_cond_wait(&condvar, &mtx); + if (ret != 0) { + printf("pthread_cond_wait %d failed (%d)\n", i, + ret); + talloc_free(mem_ctx); + ret = pthread_mutex_unlock(&mtx); + assert(ret == 0); + } + } + + /* and move it onto our toplevel hierarchy. */ + (void)talloc_move(mem_ctx, &intermediate_ptr); + + /* Tell the sub-threads we're ready for another. */ + pthread_cond_broadcast(&condvar); + ret = pthread_mutex_unlock(&mtx); + assert(ret == 0); + } + + CHECK_SIZE("pthread_talloc_passing", mem_ctx, NUM_THREADS * 100); +#if 1 + /* Dump the hierarchy. */ + talloc_report(mem_ctx, stdout); +#endif + talloc_free(mem_ctx); + printf("success: pthread_talloc_passing\n"); + return true; +} +#endif + +static void test_magic_protection_abort(const char *reason) +{ + /* exit with errcode 42 to communicate successful test to the parent process */ + if (strcmp(reason, "Bad talloc magic value - unknown value") == 0) { + _exit(42); + } else { + printf("talloc aborted for an unexpected reason\n"); + } +} + +static int test_magic_protection_destructor(int *ptr) +{ + _exit(404); /* Not 42 */ +} + +static bool test_magic_protection(void) +{ + void *pool = talloc_pool(NULL, 1024); + int *p1, *p2; + pid_t pid; + int exit_status; + + printf("test: magic_protection\n"); + p1 = talloc(pool, int); + p2 = talloc(pool, int); + + /* To avoid complaints from the compiler assign values to the p1 & p2. */ + *p1 = 6; + *p2 = 9; + + pid = fork(); + if (pid == 0) { + talloc_set_abort_fn(test_magic_protection_abort); + talloc_set_destructor(p2, test_magic_protection_destructor); + + /* + * Simulate a security attack + * by triggering a buffer overflow in memset to overwrite the + * constructor in the next pool chunk. + * + * Real attacks would attempt to set a real destructor. + */ + memset(p1, '\0', 32); + + /* Then the attack takes effect when the memory's freed. */ + talloc_free(pool); + + /* Never reached. Make compilers happy */ + return true; + } + + while (wait(&exit_status) != pid); + + talloc_free(pool); /* make ASAN happy */ + + if (!WIFEXITED(exit_status)) { + printf("Child exited through unexpected abnormal means\n"); + return false; + } + if (WEXITSTATUS(exit_status) != 42) { + printf("Child exited with wrong exit status\n"); + return false; + } + if (WIFSIGNALED(exit_status)) { + printf("Child received unexpected signal\n"); + return false; + } + + printf("success: magic_protection\n"); + return true; +} + +static void test_magic_free_protection_abort(const char *reason) +{ + /* exit with errcode 42 to communicate successful test to the parent process */ + if (strcmp(reason, "Bad talloc magic value - access after free") == 0) { + _exit(42); + } + /* not 42 */ + _exit(404); +} + +static bool test_magic_free_protection(void) +{ + void *pool = talloc_pool(NULL, 1024); + int *p1, *p2, *p3; + pid_t pid; + int exit_status; + + printf("test: magic_free_protection\n"); + p1 = talloc(pool, int); + p2 = talloc(pool, int); + + /* To avoid complaints from the compiler assign values to the p1 & p2. */ + *p1 = 6; + *p2 = 9; + + p3 = talloc_realloc(pool, p2, int, 2048); + torture_assert("pool realloc 2048", + p3 != p2, + "failed: pointer not changed"); + + /* + * Now access the memory in the pool after the realloc(). It + * should be marked as free, so use of the old pointer should + * trigger the abort function + */ + pid = fork(); + if (pid == 0) { + talloc_set_abort_fn(test_magic_free_protection_abort); + + talloc_get_name(p2); + + /* Never reached. Make compilers happy */ + return true; + } + + while (wait(&exit_status) != pid); + + if (!WIFEXITED(exit_status)) { + printf("Child exited through unexpected abnormal means\n"); + return false; + } + if (WEXITSTATUS(exit_status) != 42) { + printf("Child exited with wrong exit status\n"); + return false; + } + if (WIFSIGNALED(exit_status)) { + printf("Child received unexpected signal\n"); + return false; + } + + talloc_free(pool); + + printf("success: magic_free_protection\n"); + return true; +} + +static void test_reset(void) +{ + talloc_set_log_fn(test_log_stdout); + test_abort_stop(); + talloc_disable_null_tracking(); + talloc_enable_null_tracking_no_autofree(); +} + +bool torture_local_talloc(struct torture_context *tctx) +{ + bool ret = true; + + setlinebuf(stdout); + + test_reset(); + ret &= test_pooled_object(); + test_reset(); + ret &= test_pool_nest(); + test_reset(); + ret &= test_ref1(); + test_reset(); + ret &= test_ref2(); + test_reset(); + ret &= test_ref3(); + test_reset(); + ret &= test_ref4(); + test_reset(); + ret &= test_unlink1(); + test_reset(); + ret &= test_misc(); + test_reset(); + ret &= test_realloc(); + test_reset(); + ret &= test_realloc_child(); + test_reset(); + ret &= test_steal(); + test_reset(); + ret &= test_move(); + test_reset(); + ret &= test_unref_reparent(); + test_reset(); + ret &= test_realloc_fn(); + test_reset(); + ret &= test_type(); + test_reset(); + ret &= test_lifeless(); + test_reset(); + ret &= test_loop(); + test_reset(); + ret &= test_free_parent_deny_child(); + test_reset(); + ret &= test_realloc_on_destructor_parent(); + test_reset(); + ret &= test_free_parent_reparent_child(); + test_reset(); + ret &= test_free_parent_reparent_child_in_pool(); + test_reset(); + ret &= test_talloc_ptrtype(); + test_reset(); + ret &= test_talloc_free_in_destructor(); + test_reset(); + ret &= test_pool(); + test_reset(); + ret &= test_pool_steal(); + test_reset(); + ret &= test_free_ref_null_context(); + test_reset(); + ret &= test_rusty(); + test_reset(); + ret &= test_free_children(); + test_reset(); + ret &= test_memlimit(); +#ifdef HAVE_PTHREAD + test_reset(); + ret &= test_pthread_talloc_passing(); +#endif + + + if (ret) { + test_reset(); + ret &= test_speed(); + } + test_reset(); + ret &= test_autofree(); + test_reset(); + ret &= test_magic_protection(); + test_reset(); + ret &= test_magic_free_protection(); + + test_reset(); + talloc_disable_null_tracking(); + return ret; +} diff --git a/testsuite_main.c b/testsuite_main.c new file mode 100644 index 0000000..50ce0f8 --- /dev/null +++ b/testsuite_main.c @@ -0,0 +1,36 @@ +/* + Unix SMB/CIFS implementation. + + local testing of talloc routines. + + Copyright (C) Andrew Tridgell 2004 + + ** NOTE! The following LGPL license applies to the talloc + ** library. This does NOT imply that all of Samba is released + ** under the LGPL + + This library is free software; you can redistribute it and/or + modify it under the terms of the GNU Lesser General Public + License as published by the Free Software Foundation; either + version 3 of the License, or (at your option) any later version. + + This library is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Lesser General Public License for more details. + + You should have received a copy of the GNU Lesser General Public + License along with this library; if not, see <http://www.gnu.org/licenses/>. +*/ + +#include "replace.h" + +#include "talloc_testsuite.h" + +int main(void) +{ + bool ret = torture_local_talloc(NULL); + if (!ret) + return -1; + return 0; +} diff --git a/third_party/waf/update.sh b/third_party/waf/update.sh new file mode 100755 index 0000000..45fbeec --- /dev/null +++ b/third_party/waf/update.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +if [[ $# -lt 1 ]]; then + echo "Usage: update.sh VERSION" + exit 1 +fi + +WAF_VERSION="${1}" +WAF_GIT="https://gitlab.com/ita1024/waf.git" +WAF_UPDATE_SCRIPT="$(readlink -f "$0")" +WAF_SAMBA_DIR="$(dirname "${WAF_UPDATE_SCRIPT}")" +WAF_TMPDIR=$(mktemp --tmpdir -d waf-XXXXXXXX) + +echo "VERSION: ${WAF_VERSION}" +echo "GIT URL: ${WAF_GIT}" +echo "WAF SAMBA DIR: ${WAF_SAMBA_DIR}" +echo "WAF TMP DIR: ${WAF_TMPDIR}" + +cleanup_tmpdir() +{ + popd 2>/dev/null || true + rm -rf "$WAF_TMPDIR" +} +trap cleanup_tmpdir SIGINT + +cleanup_and_exit() +{ + cleanup_tmpdir + if test "$1" = 0 -o -z "$1"; then + exit 0 + else + exit "$1" + fi +} + +# Checkout the git tree +mkdir -p "${WAF_TMPDIR}" +pushd "${WAF_TMPDIR}" || cleanup_and_exit 1 + +git clone "${WAF_GIT}" +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: Failed to clone repository" + cleanup_and_exit 1 +fi + +pushd waf || cleanup_and_exit 1 +git checkout -b "waf-${WAF_VERSION}" "waf-${WAF_VERSION}" +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: Failed to checkout waf-${WAF_VERSION} repository" + cleanup_and_exit 1 +fi +popd || cleanup_and_exit 1 + +popd || cleanup_and_exit 1 + +# Update waflib +pushd "${WAF_SAMBA_DIR}" || cleanup_and_exit 1 +pwd + +rm -rf waflib/ +rsync -av "${WAF_TMPDIR}/waf/waflib" . +ret=$? +if [ $ret -ne 0 ]; then + echo "ERROR: Failed copy waflib" + cleanup_and_exit 1 +fi +chmod -x waflib/Context.py + +git add waflib + +popd || cleanup_and_exit 1 + +echo +echo "Now please change VERSION in buildtools/bin/waf and" +echo "Context.HEXVERSION in buildtools/wafsamba/wafsamba.py" +grep WAFVERSION "${WAF_SAMBA_DIR}/waflib/Context.py" +grep HEXVERSION "${WAF_SAMBA_DIR}/waflib/Context.py" +echo + +cleanup_and_exit 0 diff --git a/third_party/waf/waflib/Build.py b/third_party/waf/waflib/Build.py new file mode 100644 index 0000000..b49dd83 --- /dev/null +++ b/third_party/waf/waflib/Build.py @@ -0,0 +1,1514 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Classes related to the build phase (build, clean, install, step, etc) + +The inheritance tree is the following: + +""" + +import os, sys, errno, re, shutil, stat +try: + import cPickle +except ImportError: + import pickle as cPickle +from waflib import Node, Runner, TaskGen, Utils, ConfigSet, Task, Logs, Options, Context, Errors + +CACHE_DIR = 'c4che' +"""Name of the cache directory""" + +CACHE_SUFFIX = '_cache.py' +"""ConfigSet cache files for variants are written under :py:attr:´waflib.Build.CACHE_DIR´ in the form ´variant_name´_cache.py""" + +INSTALL = 1337 +"""Positive value '->' install, see :py:attr:`waflib.Build.BuildContext.is_install`""" + +UNINSTALL = -1337 +"""Negative value '<-' uninstall, see :py:attr:`waflib.Build.BuildContext.is_install`""" + +SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() +"""Build class members to save between the runs; these should be all dicts +except for `root` which represents a :py:class:`waflib.Node.Node` instance +""" + +CFG_FILES = 'cfg_files' +"""Files from the build directory to hash before starting the build (``config.h`` written during the configuration)""" + +POST_AT_ONCE = 0 +"""Post mode: all task generators are posted before any task executed""" + +POST_LAZY = 1 +"""Post mode: post the task generators group after group, the tasks in the next group are created when the tasks in the previous groups are done""" + +PROTOCOL = -1 +if sys.platform == 'cli': + PROTOCOL = 0 + +class BuildContext(Context.Context): + '''executes the build''' + + cmd = 'build' + variant = '' + + def __init__(self, **kw): + super(BuildContext, self).__init__(**kw) + + self.is_install = 0 + """Non-zero value when installing or uninstalling file""" + + self.top_dir = kw.get('top_dir', Context.top_dir) + """See :py:attr:`waflib.Context.top_dir`; prefer :py:attr:`waflib.Build.BuildContext.srcnode`""" + + self.out_dir = kw.get('out_dir', Context.out_dir) + """See :py:attr:`waflib.Context.out_dir`; prefer :py:attr:`waflib.Build.BuildContext.bldnode`""" + + self.run_dir = kw.get('run_dir', Context.run_dir) + """See :py:attr:`waflib.Context.run_dir`""" + + self.launch_dir = Context.launch_dir + """See :py:attr:`waflib.Context.out_dir`; prefer :py:meth:`waflib.Build.BuildContext.launch_node`""" + + self.post_mode = POST_LAZY + """Whether to post the task generators at once or group-by-group (default is group-by-group)""" + + self.cache_dir = kw.get('cache_dir') + if not self.cache_dir: + self.cache_dir = os.path.join(self.out_dir, CACHE_DIR) + + self.all_envs = {} + """Map names to :py:class:`waflib.ConfigSet.ConfigSet`, the empty string must map to the default environment""" + + # ======================================= # + # cache variables + + self.node_sigs = {} + """Dict mapping build nodes to task identifier (uid), it indicates whether a task created a particular file (persists across builds)""" + + self.task_sigs = {} + """Dict mapping task identifiers (uid) to task signatures (persists across builds)""" + + self.imp_sigs = {} + """Dict mapping task identifiers (uid) to implicit task dependencies used for scanning targets (persists across builds)""" + + self.node_deps = {} + """Dict mapping task identifiers (uid) to node dependencies found by :py:meth:`waflib.Task.Task.scan` (persists across builds)""" + + self.raw_deps = {} + """Dict mapping task identifiers (uid) to custom data returned by :py:meth:`waflib.Task.Task.scan` (persists across builds)""" + + self.task_gen_cache_names = {} + + self.jobs = Options.options.jobs + """Amount of jobs to run in parallel""" + + self.targets = Options.options.targets + """List of targets to build (default: \\*)""" + + self.keep = Options.options.keep + """Whether the build should continue past errors""" + + self.progress_bar = Options.options.progress_bar + """ + Level of progress status: + + 0. normal output + 1. progress bar + 2. IDE output + 3. No output at all + """ + + # Manual dependencies. + self.deps_man = Utils.defaultdict(list) + """Manual dependencies set by :py:meth:`waflib.Build.BuildContext.add_manual_dependency`""" + + # just the structure here + self.current_group = 0 + """ + Current build group + """ + + self.groups = [] + """ + List containing lists of task generators + """ + + self.group_names = {} + """ + Map group names to the group lists. See :py:meth:`waflib.Build.BuildContext.add_group` + """ + + for v in SAVED_ATTRS: + if not hasattr(self, v): + setattr(self, v, {}) + + def get_variant_dir(self): + """Getter for the variant_dir attribute""" + if not self.variant: + return self.out_dir + return os.path.join(self.out_dir, os.path.normpath(self.variant)) + variant_dir = property(get_variant_dir, None) + + def __call__(self, *k, **kw): + """ + Create a task generator and add it to the current build group. The following forms are equivalent:: + + def build(bld): + tg = bld(a=1, b=2) + + def build(bld): + tg = bld() + tg.a = 1 + tg.b = 2 + + def build(bld): + tg = TaskGen.task_gen(a=1, b=2) + bld.add_to_group(tg, None) + + :param group: group name to add the task generator to + :type group: string + """ + kw['bld'] = self + ret = TaskGen.task_gen(*k, **kw) + self.task_gen_cache_names = {} # reset the cache, each time + self.add_to_group(ret, group=kw.get('group')) + return ret + + def __copy__(self): + """ + Build contexts cannot be copied + + :raises: :py:class:`waflib.Errors.WafError` + """ + raise Errors.WafError('build contexts cannot be copied') + + def load_envs(self): + """ + The configuration command creates files of the form ``build/c4che/NAMEcache.py``. This method + creates a :py:class:`waflib.ConfigSet.ConfigSet` instance for each ``NAME`` by reading those + files and stores them in :py:attr:`waflib.Build.BuildContext.allenvs`. + """ + node = self.root.find_node(self.cache_dir) + if not node: + raise Errors.WafError('The project was not configured: run "waf configure" first!') + lst = node.ant_glob('**/*%s' % CACHE_SUFFIX, quiet=True) + + if not lst: + raise Errors.WafError('The cache directory is empty: reconfigure the project') + + for x in lst: + name = x.path_from(node).replace(CACHE_SUFFIX, '').replace('\\', '/') + env = ConfigSet.ConfigSet(x.abspath()) + self.all_envs[name] = env + for f in env[CFG_FILES]: + newnode = self.root.find_resource(f) + if not newnode or not newnode.exists(): + raise Errors.WafError('Missing configuration file %r, reconfigure the project!' % f) + + def init_dirs(self): + """ + Initialize the project directory and the build directory by creating the nodes + :py:attr:`waflib.Build.BuildContext.srcnode` and :py:attr:`waflib.Build.BuildContext.bldnode` + corresponding to ``top_dir`` and ``variant_dir`` respectively. The ``bldnode`` directory is + created if necessary. + """ + if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): + raise Errors.WafError('The project was not configured: run "waf configure" first!') + + self.path = self.srcnode = self.root.find_dir(self.top_dir) + self.bldnode = self.root.make_node(self.variant_dir) + self.bldnode.mkdir() + + def execute(self): + """ + Restore data from previous builds and call :py:meth:`waflib.Build.BuildContext.execute_build`. + Overrides from :py:func:`waflib.Context.Context.execute` + """ + self.restore() + if not self.all_envs: + self.load_envs() + self.execute_build() + + def execute_build(self): + """ + Execute the build by: + + * reading the scripts (see :py:meth:`waflib.Context.Context.recurse`) + * calling :py:meth:`waflib.Build.BuildContext.pre_build` to call user build functions + * calling :py:meth:`waflib.Build.BuildContext.compile` to process the tasks + * calling :py:meth:`waflib.Build.BuildContext.post_build` to call user build functions + """ + + Logs.info("Waf: Entering directory `%s'", self.variant_dir) + self.recurse([self.run_dir]) + self.pre_build() + + # display the time elapsed in the progress bar + self.timer = Utils.Timer() + + try: + self.compile() + finally: + if self.progress_bar == 1 and sys.stderr.isatty(): + c = self.producer.processed or 1 + m = self.progress_line(c, c, Logs.colors.BLUE, Logs.colors.NORMAL) + Logs.info(m, extra={'stream': sys.stderr, 'c1': Logs.colors.cursor_off, 'c2' : Logs.colors.cursor_on}) + Logs.info("Waf: Leaving directory `%s'", self.variant_dir) + try: + self.producer.bld = None + del self.producer + except AttributeError: + pass + self.post_build() + + def restore(self): + """ + Load data from a previous run, sets the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS` + """ + try: + env = ConfigSet.ConfigSet(os.path.join(self.cache_dir, 'build.config.py')) + except EnvironmentError: + pass + else: + if env.version < Context.HEXVERSION: + raise Errors.WafError('Project was configured with a different version of Waf, please reconfigure it') + + for t in env.tools: + self.setup(**t) + + dbfn = os.path.join(self.variant_dir, Context.DBFILE) + try: + data = Utils.readf(dbfn, 'rb') + except (EnvironmentError, EOFError): + # handle missing file/empty file + Logs.debug('build: Could not load the build cache %s (missing)', dbfn) + else: + try: + Node.pickle_lock.acquire() + Node.Nod3 = self.node_class + try: + data = cPickle.loads(data) + except Exception as e: + Logs.debug('build: Could not pickle the build cache %s: %r', dbfn, e) + else: + for x in SAVED_ATTRS: + setattr(self, x, data.get(x, {})) + finally: + Node.pickle_lock.release() + + self.init_dirs() + + def store(self): + """ + Store data for next runs, set the attributes listed in :py:const:`waflib.Build.SAVED_ATTRS`. Uses a temporary + file to avoid problems on ctrl+c. + """ + data = {} + for x in SAVED_ATTRS: + data[x] = getattr(self, x) + db = os.path.join(self.variant_dir, Context.DBFILE) + + try: + Node.pickle_lock.acquire() + Node.Nod3 = self.node_class + x = cPickle.dumps(data, PROTOCOL) + finally: + Node.pickle_lock.release() + + Utils.writef(db + '.tmp', x, m='wb') + + try: + st = os.stat(db) + os.remove(db) + if not Utils.is_win32: # win32 has no chown but we're paranoid + os.chown(db + '.tmp', st.st_uid, st.st_gid) + except (AttributeError, OSError): + pass + + # do not use shutil.move (copy is not thread-safe) + os.rename(db + '.tmp', db) + + def compile(self): + """ + Run the build by creating an instance of :py:class:`waflib.Runner.Parallel` + The cache file is written when at least a task was executed. + + :raises: :py:class:`waflib.Errors.BuildError` in case the build fails + """ + Logs.debug('build: compile()') + + # delegate the producer-consumer logic to another object to reduce the complexity + self.producer = Runner.Parallel(self, self.jobs) + self.producer.biter = self.get_build_iterator() + try: + self.producer.start() + except KeyboardInterrupt: + if self.is_dirty(): + self.store() + raise + else: + if self.is_dirty(): + self.store() + + if self.producer.error: + raise Errors.BuildError(self.producer.error) + + def is_dirty(self): + return self.producer.dirty + + def setup(self, tool, tooldir=None, funs=None): + """ + Import waf tools defined during the configuration:: + + def configure(conf): + conf.load('glib2') + + def build(bld): + pass # glib2 is imported implicitly + + :param tool: tool list + :type tool: list + :param tooldir: optional tool directory (sys.path) + :type tooldir: list of string + :param funs: unused variable + """ + if isinstance(tool, list): + for i in tool: + self.setup(i, tooldir) + return + + module = Context.load_tool(tool, tooldir) + if hasattr(module, "setup"): + module.setup(self) + + def get_env(self): + """Getter for the env property""" + try: + return self.all_envs[self.variant] + except KeyError: + return self.all_envs[''] + def set_env(self, val): + """Setter for the env property""" + self.all_envs[self.variant] = val + + env = property(get_env, set_env) + + def add_manual_dependency(self, path, value): + """ + Adds a dependency from a node object to a value:: + + def build(bld): + bld.add_manual_dependency( + bld.path.find_resource('wscript'), + bld.root.find_resource('/etc/fstab')) + + :param path: file path + :type path: string or :py:class:`waflib.Node.Node` + :param value: value to depend + :type value: :py:class:`waflib.Node.Node`, byte object, or function returning a byte object + """ + if not path: + raise ValueError('Invalid input path %r' % path) + + if isinstance(path, Node.Node): + node = path + elif os.path.isabs(path): + node = self.root.find_resource(path) + else: + node = self.path.find_resource(path) + if not node: + raise ValueError('Could not find the path %r' % path) + + if isinstance(value, list): + self.deps_man[node].extend(value) + else: + self.deps_man[node].append(value) + + def launch_node(self): + """Returns the launch directory as a :py:class:`waflib.Node.Node` object (cached)""" + try: + # private cache + return self.p_ln + except AttributeError: + self.p_ln = self.root.find_dir(self.launch_dir) + return self.p_ln + + def hash_env_vars(self, env, vars_lst): + """ + Hashes configuration set variables:: + + def build(bld): + bld.hash_env_vars(bld.env, ['CXX', 'CC']) + + This method uses an internal cache. + + :param env: Configuration Set + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :param vars_lst: list of variables + :type vars_list: list of string + """ + + if not env.table: + env = env.parent + if not env: + return Utils.SIG_NIL + + idx = str(id(env)) + str(vars_lst) + try: + cache = self.cache_env + except AttributeError: + cache = self.cache_env = {} + else: + try: + return self.cache_env[idx] + except KeyError: + pass + + lst = [env[a] for a in vars_lst] + cache[idx] = ret = Utils.h_list(lst) + Logs.debug('envhash: %s %r', Utils.to_hex(ret), lst) + return ret + + def get_tgen_by_name(self, name): + """ + Fetches a task generator by its name or its target attribute; + the name must be unique in a build:: + + def build(bld): + tg = bld(name='foo') + tg == bld.get_tgen_by_name('foo') + + This method use a private internal cache. + + :param name: Task generator name + :raises: :py:class:`waflib.Errors.WafError` in case there is no task genenerator by that name + """ + cache = self.task_gen_cache_names + if not cache: + # create the index lazily + for g in self.groups: + for tg in g: + try: + cache[tg.name] = tg + except AttributeError: + # raised if not a task generator, which should be uncommon + pass + try: + return cache[name] + except KeyError: + raise Errors.WafError('Could not find a task generator for the name %r' % name) + + def progress_line(self, idx, total, col1, col2): + """ + Computes a progress bar line displayed when running ``waf -p`` + + :returns: progress bar line + :rtype: string + """ + if not sys.stderr.isatty(): + return '' + + n = len(str(total)) + + Utils.rot_idx += 1 + ind = Utils.rot_chr[Utils.rot_idx % 4] + + pc = (100. * idx)/total + fs = "[%%%dd/%%d][%%s%%2d%%%%%%s][%s][" % (n, ind) + left = fs % (idx, total, col1, pc, col2) + right = '][%s%s%s]' % (col1, self.timer, col2) + + cols = Logs.get_term_cols() - len(left) - len(right) + 2*len(col1) + 2*len(col2) + if cols < 7: + cols = 7 + + ratio = ((cols * idx)//total) - 1 + + bar = ('='*ratio+'>').ljust(cols) + msg = Logs.indicator % (left, bar, right) + + return msg + + def declare_chain(self, *k, **kw): + """ + Wraps :py:func:`waflib.TaskGen.declare_chain` for convenience + """ + return TaskGen.declare_chain(*k, **kw) + + def pre_build(self): + """Executes user-defined methods before the build starts, see :py:meth:`waflib.Build.BuildContext.add_pre_fun`""" + for m in getattr(self, 'pre_funs', []): + m(self) + + def post_build(self): + """Executes user-defined methods after the build is successful, see :py:meth:`waflib.Build.BuildContext.add_post_fun`""" + for m in getattr(self, 'post_funs', []): + m(self) + + def add_pre_fun(self, meth): + """ + Binds a callback method to execute after the scripts are read and before the build starts:: + + def mycallback(bld): + print("Hello, world!") + + def build(bld): + bld.add_pre_fun(mycallback) + """ + try: + self.pre_funs.append(meth) + except AttributeError: + self.pre_funs = [meth] + + def add_post_fun(self, meth): + """ + Binds a callback method to execute immediately after the build is successful:: + + def call_ldconfig(bld): + bld.exec_command('/sbin/ldconfig') + + def build(bld): + if bld.cmd == 'install': + bld.add_pre_fun(call_ldconfig) + """ + try: + self.post_funs.append(meth) + except AttributeError: + self.post_funs = [meth] + + def get_group(self, x): + """ + Returns the build group named `x`, or the current group if `x` is None + + :param x: name or number or None + :type x: string, int or None + """ + if not self.groups: + self.add_group() + if x is None: + return self.groups[self.current_group] + if x in self.group_names: + return self.group_names[x] + return self.groups[x] + + def add_to_group(self, tgen, group=None): + """Adds a task or a task generator to the build; there is no attempt to remove it if it was already added.""" + assert(isinstance(tgen, TaskGen.task_gen) or isinstance(tgen, Task.Task)) + tgen.bld = self + self.get_group(group).append(tgen) + + def get_group_name(self, g): + """ + Returns the name of the input build group + + :param g: build group object or build group index + :type g: integer or list + :return: name + :rtype: string + """ + if not isinstance(g, list): + g = self.groups[g] + for x in self.group_names: + if id(self.group_names[x]) == id(g): + return x + return '' + + def get_group_idx(self, tg): + """ + Returns the index of the group containing the task generator given as argument:: + + def build(bld): + tg = bld(name='nada') + 0 == bld.get_group_idx(tg) + + :param tg: Task generator object + :type tg: :py:class:`waflib.TaskGen.task_gen` + :rtype: int + """ + se = id(tg) + for i, tmp in enumerate(self.groups): + for t in tmp: + if id(t) == se: + return i + return None + + def add_group(self, name=None, move=True): + """ + Adds a new group of tasks/task generators. By default the new group becomes + the default group for new task generators (make sure to create build groups in order). + + :param name: name for this group + :type name: string + :param move: set this new group as default group (True by default) + :type move: bool + :raises: :py:class:`waflib.Errors.WafError` if a group by the name given already exists + """ + if name and name in self.group_names: + raise Errors.WafError('add_group: name %s already present', name) + g = [] + self.group_names[name] = g + self.groups.append(g) + if move: + self.current_group = len(self.groups) - 1 + + def set_group(self, idx): + """ + Sets the build group at position idx as current so that newly added + task generators are added to this one by default:: + + def build(bld): + bld(rule='touch ${TGT}', target='foo.txt') + bld.add_group() # now the current group is 1 + bld(rule='touch ${TGT}', target='bar.txt') + bld.set_group(0) # now the current group is 0 + bld(rule='touch ${TGT}', target='truc.txt') # build truc.txt before bar.txt + + :param idx: group name or group index + :type idx: string or int + """ + if isinstance(idx, str): + g = self.group_names[idx] + for i, tmp in enumerate(self.groups): + if id(g) == id(tmp): + self.current_group = i + break + else: + self.current_group = idx + + def total(self): + """ + Approximate task count: this value may be inaccurate if task generators + are posted lazily (see :py:attr:`waflib.Build.BuildContext.post_mode`). + The value :py:attr:`waflib.Runner.Parallel.total` is updated during the task execution. + + :rtype: int + """ + total = 0 + for group in self.groups: + for tg in group: + try: + total += len(tg.tasks) + except AttributeError: + total += 1 + return total + + def get_targets(self): + """ + This method returns a pair containing the index of the last build group to post, + and the list of task generator objects corresponding to the target names. + + This is used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` + to perform partial builds:: + + $ waf --targets=myprogram,myshlib + + :return: the minimum build group index, and list of task generators + :rtype: tuple + """ + to_post = [] + min_grp = 0 + for name in self.targets.split(','): + tg = self.get_tgen_by_name(name) + m = self.get_group_idx(tg) + if m > min_grp: + min_grp = m + to_post = [tg] + elif m == min_grp: + to_post.append(tg) + return (min_grp, to_post) + + def get_all_task_gen(self): + """ + Returns a list of all task generators for troubleshooting purposes. + """ + lst = [] + for g in self.groups: + lst.extend(g) + return lst + + def post_group(self): + """ + Post task generators from the group indexed by self.current_group; used internally + by :py:meth:`waflib.Build.BuildContext.get_build_iterator` + """ + def tgpost(tg): + try: + f = tg.post + except AttributeError: + pass + else: + f() + + if self.targets == '*': + for tg in self.groups[self.current_group]: + tgpost(tg) + elif self.targets: + if self.current_group < self._min_grp: + for tg in self.groups[self.current_group]: + tgpost(tg) + else: + for tg in self._exact_tg: + tg.post() + else: + ln = self.launch_node() + if ln.is_child_of(self.bldnode): + if Logs.verbose > 1: + Logs.warn('Building from the build directory, forcing --targets=*') + ln = self.srcnode + elif not ln.is_child_of(self.srcnode): + if Logs.verbose > 1: + Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)', ln.abspath(), self.srcnode.abspath()) + ln = self.srcnode + + def is_post(tg, ln): + try: + p = tg.path + except AttributeError: + pass + else: + if p.is_child_of(ln): + return True + + def is_post_group(): + for i, g in enumerate(self.groups): + if i > self.current_group: + for tg in g: + if is_post(tg, ln): + return True + + if self.post_mode == POST_LAZY and ln != self.srcnode: + # partial folder builds require all targets from a previous build group + if is_post_group(): + ln = self.srcnode + + for tg in self.groups[self.current_group]: + if is_post(tg, ln): + tgpost(tg) + + def get_tasks_group(self, idx): + """ + Returns all task instances for the build group at position idx, + used internally by :py:meth:`waflib.Build.BuildContext.get_build_iterator` + + :rtype: list of :py:class:`waflib.Task.Task` + """ + tasks = [] + for tg in self.groups[idx]: + try: + tasks.extend(tg.tasks) + except AttributeError: # not a task generator + tasks.append(tg) + return tasks + + def get_build_iterator(self): + """ + Creates a Python generator object that returns lists of tasks that may be processed in parallel. + + :return: tasks which can be executed immediately + :rtype: generator returning lists of :py:class:`waflib.Task.Task` + """ + if self.targets and self.targets != '*': + (self._min_grp, self._exact_tg) = self.get_targets() + + if self.post_mode != POST_LAZY: + for self.current_group, _ in enumerate(self.groups): + self.post_group() + + for self.current_group, _ in enumerate(self.groups): + # first post the task generators for the group + if self.post_mode != POST_AT_ONCE: + self.post_group() + + # then extract the tasks + tasks = self.get_tasks_group(self.current_group) + + # if the constraints are set properly (ext_in/ext_out, before/after) + # the call to set_file_constraints may be removed (can be a 15% penalty on no-op rebuilds) + # (but leave set_file_constraints for the installation step) + # + # if the tasks have only files, set_file_constraints is required but set_precedence_constraints is not necessary + # + Task.set_file_constraints(tasks) + Task.set_precedence_constraints(tasks) + + self.cur_tasks = tasks + if tasks: + yield tasks + + while 1: + # the build stops once there are no tasks to process + yield [] + + def install_files(self, dest, files, **kw): + """ + Creates a task generator to install files on the system:: + + def build(bld): + bld.install_files('${DATADIR}', self.path.find_resource('wscript')) + + :param dest: path representing the destination directory + :type dest: :py:class:`waflib.Node.Node` or string (absolute path) + :param files: input files + :type files: list of strings or list of :py:class:`waflib.Node.Node` + :param env: configuration set to expand *dest* + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :param relative_trick: preserve the folder hierarchy when installing whole folders + :type relative_trick: bool + :param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node` + :type cwd: :py:class:`waflib.Node.Node` + :param postpone: execute the task immediately to perform the installation (False by default) + :type postpone: bool + """ + assert(dest) + tg = self(features='install_task', install_to=dest, install_from=files, **kw) + tg.dest = tg.install_to + tg.type = 'install_files' + if not kw.get('postpone', True): + tg.post() + return tg + + def install_as(self, dest, srcfile, **kw): + """ + Creates a task generator to install a file on the system with a different name:: + + def build(bld): + bld.install_as('${PREFIX}/bin', 'myapp', chmod=Utils.O755) + + :param dest: destination file + :type dest: :py:class:`waflib.Node.Node` or string (absolute path) + :param srcfile: input file + :type srcfile: string or :py:class:`waflib.Node.Node` + :param cwd: parent node for searching srcfile, when srcfile is not an instance of :py:class:`waflib.Node.Node` + :type cwd: :py:class:`waflib.Node.Node` + :param env: configuration set for performing substitutions in dest + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :param postpone: execute the task immediately to perform the installation (False by default) + :type postpone: bool + """ + assert(dest) + tg = self(features='install_task', install_to=dest, install_from=srcfile, **kw) + tg.dest = tg.install_to + tg.type = 'install_as' + if not kw.get('postpone', True): + tg.post() + return tg + + def symlink_as(self, dest, src, **kw): + """ + Creates a task generator to install a symlink:: + + def build(bld): + bld.symlink_as('${PREFIX}/lib/libfoo.so', 'libfoo.so.1.2.3') + + :param dest: absolute path of the symlink + :type dest: :py:class:`waflib.Node.Node` or string (absolute path) + :param src: link contents, which is a relative or absolute path which may exist or not + :type src: string + :param env: configuration set for performing substitutions in dest + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :param add: add the task created to a build group - set ``False`` only if the installation task is created after the build has started + :type add: bool + :param postpone: execute the task immediately to perform the installation + :type postpone: bool + :param relative_trick: make the symlink relative (default: ``False``) + :type relative_trick: bool + """ + assert(dest) + tg = self(features='install_task', install_to=dest, install_from=src, **kw) + tg.dest = tg.install_to + tg.type = 'symlink_as' + tg.link = src + # TODO if add: self.add_to_group(tsk) + if not kw.get('postpone', True): + tg.post() + return tg + +@TaskGen.feature('install_task') +@TaskGen.before_method('process_rule', 'process_source') +def process_install_task(self): + """Creates the installation task for the current task generator; uses :py:func:`waflib.Build.add_install_task` internally.""" + self.add_install_task(**self.__dict__) + +@TaskGen.taskgen_method +def add_install_task(self, **kw): + """ + Creates the installation task for the current task generator, and executes it immediately if necessary + + :returns: An installation task + :rtype: :py:class:`waflib.Build.inst` + """ + if not self.bld.is_install: + return + if not kw['install_to']: + return + + if kw['type'] == 'symlink_as' and Utils.is_win32: + if kw.get('win32_install'): + kw['type'] = 'install_as' + else: + # just exit + return + + tsk = self.install_task = self.create_task('inst') + tsk.chmod = kw.get('chmod', Utils.O644) + tsk.link = kw.get('link', '') or kw.get('install_from', '') + tsk.relative_trick = kw.get('relative_trick', False) + tsk.type = kw['type'] + tsk.install_to = tsk.dest = kw['install_to'] + tsk.install_from = kw['install_from'] + tsk.relative_base = kw.get('cwd') or kw.get('relative_base', self.path) + tsk.install_user = kw.get('install_user') + tsk.install_group = kw.get('install_group') + tsk.init_files() + if not kw.get('postpone', True): + tsk.run_now() + return tsk + +@TaskGen.taskgen_method +def add_install_files(self, **kw): + """ + Creates an installation task for files + + :returns: An installation task + :rtype: :py:class:`waflib.Build.inst` + """ + kw['type'] = 'install_files' + return self.add_install_task(**kw) + +@TaskGen.taskgen_method +def add_install_as(self, **kw): + """ + Creates an installation task for a single file + + :returns: An installation task + :rtype: :py:class:`waflib.Build.inst` + """ + kw['type'] = 'install_as' + return self.add_install_task(**kw) + +@TaskGen.taskgen_method +def add_symlink_as(self, **kw): + """ + Creates an installation task for a symbolic link + + :returns: An installation task + :rtype: :py:class:`waflib.Build.inst` + """ + kw['type'] = 'symlink_as' + return self.add_install_task(**kw) + +class inst(Task.Task): + """Task that installs files or symlinks; it is typically executed by :py:class:`waflib.Build.InstallContext` and :py:class:`waflib.Build.UnInstallContext`""" + def __str__(self): + """Returns an empty string to disable the standard task display""" + return '' + + def uid(self): + """Returns a unique identifier for the task""" + lst = self.inputs + self.outputs + [self.link, self.generator.path.abspath()] + return Utils.h_list(lst) + + def init_files(self): + """ + Initializes the task input and output nodes + """ + if self.type == 'symlink_as': + inputs = [] + else: + inputs = self.generator.to_nodes(self.install_from) + if self.type == 'install_as': + assert len(inputs) == 1 + self.set_inputs(inputs) + + dest = self.get_install_path() + outputs = [] + if self.type == 'symlink_as': + if self.relative_trick: + self.link = os.path.relpath(self.link, os.path.dirname(dest)) + outputs.append(self.generator.bld.root.make_node(dest)) + elif self.type == 'install_as': + outputs.append(self.generator.bld.root.make_node(dest)) + else: + for y in inputs: + if self.relative_trick: + destfile = os.path.join(dest, y.path_from(self.relative_base)) + else: + destfile = os.path.join(dest, y.name) + outputs.append(self.generator.bld.root.make_node(destfile)) + self.set_outputs(outputs) + + def runnable_status(self): + """ + Installation tasks are always executed, so this method returns either :py:const:`waflib.Task.ASK_LATER` or :py:const:`waflib.Task.RUN_ME`. + """ + ret = super(inst, self).runnable_status() + if ret == Task.SKIP_ME and self.generator.bld.is_install: + return Task.RUN_ME + return ret + + def post_run(self): + """ + Disables any post-run operations + """ + pass + + def get_install_path(self, destdir=True): + """ + Returns the destination path where files will be installed, pre-pending `destdir`. + + Relative paths will be interpreted relative to `PREFIX` if no `destdir` is given. + + :rtype: string + """ + if isinstance(self.install_to, Node.Node): + dest = self.install_to.abspath() + else: + dest = os.path.normpath(Utils.subst_vars(self.install_to, self.env)) + if not os.path.isabs(dest): + dest = os.path.join(self.env.PREFIX, dest) + if destdir and Options.options.destdir: + dest = Options.options.destdir.rstrip(os.sep) + os.sep + os.path.splitdrive(dest)[1].lstrip(os.sep) + return dest + + def copy_fun(self, src, tgt): + """ + Copies a file from src to tgt, preserving permissions and trying to work + around path limitations on Windows platforms. On Unix-like platforms, + the owner/group of the target file may be set through install_user/install_group + + :param src: absolute path + :type src: string + :param tgt: absolute path + :type tgt: string + """ + # override this if you want to strip executables + # kw['tsk'].source is the task that created the files in the build + if Utils.is_win32 and len(tgt) > 259 and not tgt.startswith('\\\\?\\'): + tgt = '\\\\?\\' + tgt + shutil.copy2(src, tgt) + self.fix_perms(tgt) + + def rm_empty_dirs(self, tgt): + """ + Removes empty folders recursively when uninstalling. + + :param tgt: absolute path + :type tgt: string + """ + while tgt: + tgt = os.path.dirname(tgt) + try: + os.rmdir(tgt) + except OSError: + break + + def run(self): + """ + Performs file or symlink installation + """ + is_install = self.generator.bld.is_install + if not is_install: # unnecessary? + return + + for x in self.outputs: + if is_install == INSTALL: + x.parent.mkdir() + if self.type == 'symlink_as': + fun = is_install == INSTALL and self.do_link or self.do_unlink + fun(self.link, self.outputs[0].abspath()) + else: + fun = is_install == INSTALL and self.do_install or self.do_uninstall + launch_node = self.generator.bld.launch_node() + for x, y in zip(self.inputs, self.outputs): + fun(x.abspath(), y.abspath(), x.path_from(launch_node)) + + def run_now(self): + """ + Try executing the installation task right now + + :raises: :py:class:`waflib.Errors.TaskNotReady` + """ + status = self.runnable_status() + if status not in (Task.RUN_ME, Task.SKIP_ME): + raise Errors.TaskNotReady('Could not process %r: status %r' % (self, status)) + self.run() + self.hasrun = Task.SUCCESS + + def do_install(self, src, tgt, lbl, **kw): + """ + Copies a file from src to tgt with given file permissions. The actual copy is only performed + if the source and target file sizes or timestamps differ. When the copy occurs, + the file is always first removed and then copied so as to prevent stale inodes. + + :param src: file name as absolute path + :type src: string + :param tgt: file destination, as absolute path + :type tgt: string + :param lbl: file source description + :type lbl: string + :param chmod: installation mode + :type chmod: int + :raises: :py:class:`waflib.Errors.WafError` if the file cannot be written + """ + if not Options.options.force: + # check if the file is already there to avoid a copy + try: + st1 = os.stat(tgt) + st2 = os.stat(src) + except OSError: + pass + else: + # same size and identical timestamps -> make no copy + if st1.st_mtime + 2 >= st2.st_mtime and st1.st_size == st2.st_size: + if not self.generator.bld.progress_bar: + + c1 = Logs.colors.NORMAL + c2 = Logs.colors.BLUE + + Logs.info('%s- install %s%s%s (from %s)', c1, c2, tgt, c1, lbl) + return False + + if not self.generator.bld.progress_bar: + + c1 = Logs.colors.NORMAL + c2 = Logs.colors.BLUE + + Logs.info('%s+ install %s%s%s (from %s)', c1, c2, tgt, c1, lbl) + + # Give best attempt at making destination overwritable, + # like the 'install' utility used by 'make install' does. + try: + os.chmod(tgt, Utils.O644 | stat.S_IMODE(os.stat(tgt).st_mode)) + except EnvironmentError: + pass + + # following is for shared libs and stale inodes (-_-) + try: + os.remove(tgt) + except OSError: + pass + + try: + self.copy_fun(src, tgt) + except EnvironmentError as e: + if not os.path.exists(src): + Logs.error('File %r does not exist', src) + elif not os.path.isfile(src): + Logs.error('Input %r is not a file', src) + raise Errors.WafError('Could not install the file %r' % tgt, e) + + def fix_perms(self, tgt): + """ + Change the ownership of the file/folder/link pointed by the given path + This looks up for `install_user` or `install_group` attributes + on the task or on the task generator:: + + def build(bld): + bld.install_as('${PREFIX}/wscript', + 'wscript', + install_user='nobody', install_group='nogroup') + bld.symlink_as('${PREFIX}/wscript_link', + Utils.subst_vars('${PREFIX}/wscript', bld.env), + install_user='nobody', install_group='nogroup') + """ + if not Utils.is_win32: + user = getattr(self, 'install_user', None) or getattr(self.generator, 'install_user', None) + group = getattr(self, 'install_group', None) or getattr(self.generator, 'install_group', None) + if user or group: + Utils.lchown(tgt, user or -1, group or -1) + if not os.path.islink(tgt): + os.chmod(tgt, self.chmod) + + def do_link(self, src, tgt, **kw): + """ + Creates a symlink from tgt to src. + + :param src: file name as absolute path + :type src: string + :param tgt: file destination, as absolute path + :type tgt: string + """ + if os.path.islink(tgt) and os.readlink(tgt) == src: + if not self.generator.bld.progress_bar: + c1 = Logs.colors.NORMAL + c2 = Logs.colors.BLUE + Logs.info('%s- symlink %s%s%s (to %s)', c1, c2, tgt, c1, src) + else: + try: + os.remove(tgt) + except OSError: + pass + if not self.generator.bld.progress_bar: + c1 = Logs.colors.NORMAL + c2 = Logs.colors.BLUE + Logs.info('%s+ symlink %s%s%s (to %s)', c1, c2, tgt, c1, src) + os.symlink(src, tgt) + self.fix_perms(tgt) + + def do_uninstall(self, src, tgt, lbl, **kw): + """ + See :py:meth:`waflib.Build.inst.do_install` + """ + if not self.generator.bld.progress_bar: + c1 = Logs.colors.NORMAL + c2 = Logs.colors.BLUE + Logs.info('%s- remove %s%s%s', c1, c2, tgt, c1) + + #self.uninstall.append(tgt) + try: + os.remove(tgt) + except OSError as e: + if e.errno != errno.ENOENT: + if not getattr(self, 'uninstall_error', None): + self.uninstall_error = True + Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)') + if Logs.verbose > 1: + Logs.warn('Could not remove %s (error code %r)', e.filename, e.errno) + self.rm_empty_dirs(tgt) + + def do_unlink(self, src, tgt, **kw): + """ + See :py:meth:`waflib.Build.inst.do_link` + """ + try: + if not self.generator.bld.progress_bar: + c1 = Logs.colors.NORMAL + c2 = Logs.colors.BLUE + Logs.info('%s- remove %s%s%s', c1, c2, tgt, c1) + os.remove(tgt) + except OSError: + pass + self.rm_empty_dirs(tgt) + +class InstallContext(BuildContext): + '''installs the targets on the system''' + cmd = 'install' + + def __init__(self, **kw): + super(InstallContext, self).__init__(**kw) + self.is_install = INSTALL + +class UninstallContext(InstallContext): + '''removes the targets installed''' + cmd = 'uninstall' + + def __init__(self, **kw): + super(UninstallContext, self).__init__(**kw) + self.is_install = UNINSTALL + +class CleanContext(BuildContext): + '''cleans the project''' + cmd = 'clean' + def execute(self): + """ + See :py:func:`waflib.Build.BuildContext.execute`. + """ + self.restore() + if not self.all_envs: + self.load_envs() + + self.recurse([self.run_dir]) + try: + self.clean() + finally: + self.store() + + def clean(self): + """ + Remove most files from the build directory, and reset all caches. + + Custom lists of files to clean can be declared as `bld.clean_files`. + For example, exclude `build/program/myprogram` from getting removed:: + + def build(bld): + bld.clean_files = bld.bldnode.ant_glob('**', + excl='.lock* config.log c4che/* config.h program/myprogram', + quiet=True, generator=True) + """ + Logs.debug('build: clean called') + + if hasattr(self, 'clean_files'): + for n in self.clean_files: + n.delete() + elif self.bldnode != self.srcnode: + # would lead to a disaster if top == out + lst = [] + for env in self.all_envs.values(): + lst.extend(self.root.find_or_declare(f) for f in env[CFG_FILES]) + excluded_dirs = '.lock* *conf_check_*/** config.log %s/*' % CACHE_DIR + for n in self.bldnode.ant_glob('**/*', excl=excluded_dirs, quiet=True): + if n in lst: + continue + n.delete() + self.root.children = {} + + for v in SAVED_ATTRS: + if v == 'root': + continue + setattr(self, v, {}) + +class ListContext(BuildContext): + '''lists the targets to execute''' + cmd = 'list' + + def execute(self): + """ + In addition to printing the name of each build target, + a description column will include text for each task + generator which has a "description" field set. + + See :py:func:`waflib.Build.BuildContext.execute`. + """ + self.restore() + if not self.all_envs: + self.load_envs() + + self.recurse([self.run_dir]) + self.pre_build() + + # display the time elapsed in the progress bar + self.timer = Utils.Timer() + + for g in self.groups: + for tg in g: + try: + f = tg.post + except AttributeError: + pass + else: + f() + + try: + # force the cache initialization + self.get_tgen_by_name('') + except Errors.WafError: + pass + + targets = sorted(self.task_gen_cache_names) + + # figure out how much to left-justify, for largest target name + line_just = max(len(t) for t in targets) if targets else 0 + + for target in targets: + tgen = self.task_gen_cache_names[target] + + # Support displaying the description for the target + # if it was set on the tgen + descript = getattr(tgen, 'description', '') + if descript: + target = target.ljust(line_just) + descript = ': %s' % descript + + Logs.pprint('GREEN', target, label=descript) + +class StepContext(BuildContext): + '''executes tasks in a step-by-step fashion, for debugging''' + cmd = 'step' + + def __init__(self, **kw): + super(StepContext, self).__init__(**kw) + self.files = Options.options.files + + def compile(self): + """ + Overrides :py:meth:`waflib.Build.BuildContext.compile` to perform a partial build + on tasks matching the input/output pattern given (regular expression matching):: + + $ waf step --files=foo.c,bar.c,in:truc.c,out:bar.o + $ waf step --files=in:foo.cpp.1.o # link task only + + """ + if not self.files: + Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"') + BuildContext.compile(self) + return + + targets = [] + if self.targets and self.targets != '*': + targets = self.targets.split(',') + + for g in self.groups: + for tg in g: + if targets and tg.name not in targets: + continue + + try: + f = tg.post + except AttributeError: + pass + else: + f() + + for pat in self.files.split(','): + matcher = self.get_matcher(pat) + for tg in g: + if isinstance(tg, Task.Task): + lst = [tg] + else: + lst = tg.tasks + for tsk in lst: + do_exec = False + for node in tsk.inputs: + if matcher(node, output=False): + do_exec = True + break + for node in tsk.outputs: + if matcher(node, output=True): + do_exec = True + break + if do_exec: + ret = tsk.run() + Logs.info('%s -> exit %r', tsk, ret) + + def get_matcher(self, pat): + """ + Converts a step pattern into a function + + :param: pat: pattern of the form in:truc.c,out:bar.o + :returns: Python function that uses Node objects as inputs and returns matches + :rtype: function + """ + # this returns a function + inn = True + out = True + if pat.startswith('in:'): + out = False + pat = pat.replace('in:', '') + elif pat.startswith('out:'): + inn = False + pat = pat.replace('out:', '') + + anode = self.root.find_node(pat) + pattern = None + if not anode: + if not pat.startswith('^'): + pat = '^.+?%s' % pat + if not pat.endswith('$'): + pat = '%s$' % pat + pattern = re.compile(pat) + + def match(node, output): + if output and not out: + return False + if not output and not inn: + return False + + if anode: + return anode == node + else: + return pattern.match(node.abspath()) + return match + +class EnvContext(BuildContext): + """Subclass EnvContext to create commands that require configuration data in 'env'""" + fun = cmd = None + def execute(self): + """ + See :py:func:`waflib.Build.BuildContext.execute`. + """ + self.restore() + if not self.all_envs: + self.load_envs() + self.recurse([self.run_dir]) + diff --git a/third_party/waf/waflib/ConfigSet.py b/third_party/waf/waflib/ConfigSet.py new file mode 100644 index 0000000..901fba6 --- /dev/null +++ b/third_party/waf/waflib/ConfigSet.py @@ -0,0 +1,361 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" + +ConfigSet: a special dict + +The values put in :py:class:`ConfigSet` must be serializable (dicts, lists, strings) +""" + +import copy, re, os +from waflib import Logs, Utils +re_imp = re.compile(r'^(#)*?([^#=]*?)\ =\ (.*?)$', re.M) + +class ConfigSet(object): + """ + A copy-on-write dict with human-readable serialized format. The serialization format + is human-readable (python-like) and performed by using eval() and repr(). + For high performance prefer pickle. Do not store functions as they are not serializable. + + The values can be accessed by attributes or by keys:: + + from waflib.ConfigSet import ConfigSet + env = ConfigSet() + env.FOO = 'test' + env['FOO'] = 'test' + """ + __slots__ = ('table', 'parent') + def __init__(self, filename=None): + self.table = {} + """ + Internal dict holding the object values + """ + #self.parent = None + + if filename: + self.load(filename) + + def __contains__(self, key): + """ + Enables the *in* syntax:: + + if 'foo' in env: + print(env['foo']) + """ + if key in self.table: + return True + try: + return self.parent.__contains__(key) + except AttributeError: + return False # parent may not exist + + def keys(self): + """Dict interface""" + keys = set() + cur = self + while cur: + keys.update(cur.table.keys()) + cur = getattr(cur, 'parent', None) + keys = list(keys) + keys.sort() + return keys + + def __iter__(self): + return iter(self.keys()) + + def __str__(self): + """Text representation of the ConfigSet (for debugging purposes)""" + return "\n".join(["%r %r" % (x, self.__getitem__(x)) for x in self.keys()]) + + def __getitem__(self, key): + """ + Dictionary interface: get value from key:: + + def configure(conf): + conf.env['foo'] = {} + print(env['foo']) + """ + try: + while 1: + x = self.table.get(key) + if not x is None: + return x + self = self.parent + except AttributeError: + return [] + + def __setitem__(self, key, value): + """ + Dictionary interface: set value from key + """ + self.table[key] = value + + def __delitem__(self, key): + """ + Dictionary interface: mark the value as missing + """ + self[key] = [] + + def __getattr__(self, name): + """ + Attribute access provided for convenience. The following forms are equivalent:: + + def configure(conf): + conf.env.value + conf.env['value'] + """ + if name in self.__slots__: + return object.__getattribute__(self, name) + else: + return self[name] + + def __setattr__(self, name, value): + """ + Attribute access provided for convenience. The following forms are equivalent:: + + def configure(conf): + conf.env.value = x + env['value'] = x + """ + if name in self.__slots__: + object.__setattr__(self, name, value) + else: + self[name] = value + + def __delattr__(self, name): + """ + Attribute access provided for convenience. The following forms are equivalent:: + + def configure(conf): + del env.value + del env['value'] + """ + if name in self.__slots__: + object.__delattr__(self, name) + else: + del self[name] + + def derive(self): + """ + Returns a new ConfigSet deriving from self. The copy returned + will be a shallow copy:: + + from waflib.ConfigSet import ConfigSet + env = ConfigSet() + env.append_value('CFLAGS', ['-O2']) + child = env.derive() + child.CFLAGS.append('test') # warning! this will modify 'env' + child.CFLAGS = ['-O3'] # new list, ok + child.append_value('CFLAGS', ['-O3']) # ok + + Use :py:func:`ConfigSet.detach` to detach the child from the parent. + """ + newenv = ConfigSet() + newenv.parent = self + return newenv + + def detach(self): + """ + Detaches this instance from its parent (if present) + + Modifying the parent :py:class:`ConfigSet` will not change the current object + Modifying this :py:class:`ConfigSet` will not modify the parent one. + """ + tbl = self.get_merged_dict() + try: + delattr(self, 'parent') + except AttributeError: + pass + else: + keys = tbl.keys() + for x in keys: + tbl[x] = copy.deepcopy(tbl[x]) + self.table = tbl + return self + + def get_flat(self, key): + """ + Returns a value as a string. If the input is a list, the value returned is space-separated. + + :param key: key to use + :type key: string + """ + s = self[key] + if isinstance(s, str): + return s + return ' '.join(s) + + def _get_list_value_for_modification(self, key): + """ + Returns a list value for further modification. + + The list may be modified inplace and there is no need to do this afterwards:: + + self.table[var] = value + """ + try: + value = self.table[key] + except KeyError: + try: + value = self.parent[key] + except AttributeError: + value = [] + else: + if isinstance(value, list): + # force a copy + value = value[:] + else: + value = [value] + self.table[key] = value + else: + if not isinstance(value, list): + self.table[key] = value = [value] + return value + + def append_value(self, var, val): + """ + Appends a value to the specified config key:: + + def build(bld): + bld.env.append_value('CFLAGS', ['-O2']) + + The value must be a list or a tuple + """ + if isinstance(val, str): # if there were string everywhere we could optimize this + val = [val] + current_value = self._get_list_value_for_modification(var) + current_value.extend(val) + + def prepend_value(self, var, val): + """ + Prepends a value to the specified item:: + + def configure(conf): + conf.env.prepend_value('CFLAGS', ['-O2']) + + The value must be a list or a tuple + """ + if isinstance(val, str): + val = [val] + self.table[var] = val + self._get_list_value_for_modification(var) + + def append_unique(self, var, val): + """ + Appends a value to the specified item only if it's not already present:: + + def build(bld): + bld.env.append_unique('CFLAGS', ['-O2', '-g']) + + The value must be a list or a tuple + """ + if isinstance(val, str): + val = [val] + current_value = self._get_list_value_for_modification(var) + + for x in val: + if x not in current_value: + current_value.append(x) + + def get_merged_dict(self): + """ + Computes the merged dictionary from the fusion of self and all its parent + + :rtype: a ConfigSet object + """ + table_list = [] + env = self + while 1: + table_list.insert(0, env.table) + try: + env = env.parent + except AttributeError: + break + merged_table = {} + for table in table_list: + merged_table.update(table) + return merged_table + + def store(self, filename): + """ + Serializes the :py:class:`ConfigSet` data to a file. See :py:meth:`ConfigSet.load` for reading such files. + + :param filename: file to use + :type filename: string + """ + try: + os.makedirs(os.path.split(filename)[0]) + except OSError: + pass + + buf = [] + merged_table = self.get_merged_dict() + keys = list(merged_table.keys()) + keys.sort() + + try: + fun = ascii + except NameError: + fun = repr + + for k in keys: + if k != 'undo_stack': + buf.append('%s = %s\n' % (k, fun(merged_table[k]))) + Utils.writef(filename, ''.join(buf)) + + def load(self, filename): + """ + Restores contents from a file (current values are not cleared). Files are written using :py:meth:`ConfigSet.store`. + + :param filename: file to use + :type filename: string + """ + tbl = self.table + code = Utils.readf(filename, m='r') + for m in re_imp.finditer(code): + g = m.group + tbl[g(2)] = eval(g(3)) + Logs.debug('env: %s', self.table) + + def update(self, d): + """ + Dictionary interface: replace values with the ones from another dict + + :param d: object to use the value from + :type d: dict-like object + """ + self.table.update(d) + + def stash(self): + """ + Stores the object state to provide transactionality semantics:: + + env = ConfigSet() + env.stash() + try: + env.append_value('CFLAGS', '-O3') + call_some_method(env) + finally: + env.revert() + + The history is kept in a stack, and is lost during the serialization by :py:meth:`ConfigSet.store` + """ + orig = self.table + tbl = self.table = self.table.copy() + for x in tbl.keys(): + tbl[x] = copy.deepcopy(tbl[x]) + self.undo_stack = self.undo_stack + [orig] + + def commit(self): + """ + Commits transactional changes. See :py:meth:`ConfigSet.stash` + """ + self.undo_stack.pop(-1) + + def revert(self): + """ + Reverts the object to a previous state. See :py:meth:`ConfigSet.stash` + """ + self.table = self.undo_stack.pop(-1) + diff --git a/third_party/waf/waflib/Configure.py b/third_party/waf/waflib/Configure.py new file mode 100644 index 0000000..f6fdc4e --- /dev/null +++ b/third_party/waf/waflib/Configure.py @@ -0,0 +1,656 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Configuration system + +A :py:class:`waflib.Configure.ConfigurationContext` instance is created when ``waf configure`` is called, it is used to: + +* create data dictionaries (ConfigSet instances) +* store the list of modules to import +* hold configuration routines such as ``find_program``, etc +""" + +import os, re, shlex, shutil, sys, time, traceback +from waflib import ConfigSet, Utils, Options, Logs, Context, Build, Errors + +WAF_CONFIG_LOG = 'config.log' +"""Name of the configuration log file""" + +autoconfig = False +"""Execute the configuration automatically""" + +conf_template = '''# project %(app)s configured on %(now)s by +# waf %(wafver)s (abi %(abi)s, python %(pyver)x on %(systype)s) +# using %(args)s +#''' + +class ConfigurationContext(Context.Context): + '''configures the project''' + + cmd = 'configure' + + error_handlers = [] + """ + Additional functions to handle configuration errors + """ + + def __init__(self, **kw): + super(ConfigurationContext, self).__init__(**kw) + self.environ = dict(os.environ) + self.all_envs = {} + + self.top_dir = None + self.out_dir = None + + self.tools = [] # tools loaded in the configuration, and that will be loaded when building + + self.hash = 0 + self.files = [] + + self.tool_cache = [] + + self.setenv('') + + def setenv(self, name, env=None): + """ + Set a new config set for conf.env. If a config set of that name already exists, + recall it without modification. + + The name is the filename prefix to save to ``c4che/NAME_cache.py``, and it + is also used as *variants* by the build commands. + Though related to variants, whatever kind of data may be stored in the config set:: + + def configure(cfg): + cfg.env.ONE = 1 + cfg.setenv('foo') + cfg.env.ONE = 2 + + def build(bld): + 2 == bld.env_of_name('foo').ONE + + :param name: name of the configuration set + :type name: string + :param env: ConfigSet to copy, or an empty ConfigSet is created + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + """ + if name not in self.all_envs or env: + if not env: + env = ConfigSet.ConfigSet() + self.prepare_env(env) + else: + env = env.derive() + self.all_envs[name] = env + self.variant = name + + def get_env(self): + """Getter for the env property""" + return self.all_envs[self.variant] + def set_env(self, val): + """Setter for the env property""" + self.all_envs[self.variant] = val + + env = property(get_env, set_env) + + def init_dirs(self): + """ + Initialize the project directory and the build directory + """ + + top = self.top_dir + if not top: + top = Options.options.top + if not top: + top = getattr(Context.g_module, Context.TOP, None) + if not top: + top = self.path.abspath() + top = os.path.abspath(top) + + self.srcnode = (os.path.isabs(top) and self.root or self.path).find_dir(top) + assert(self.srcnode) + + out = self.out_dir + if not out: + out = Options.options.out + if not out: + out = getattr(Context.g_module, Context.OUT, None) + if not out: + out = Options.lockfile.replace('.lock-waf_%s_' % sys.platform, '').replace('.lock-waf', '') + + # someone can be messing with symlinks + out = os.path.realpath(out) + + self.bldnode = (os.path.isabs(out) and self.root or self.path).make_node(out) + self.bldnode.mkdir() + + if not os.path.isdir(self.bldnode.abspath()): + self.fatal('Could not create the build directory %s' % self.bldnode.abspath()) + + def execute(self): + """ + See :py:func:`waflib.Context.Context.execute` + """ + self.init_dirs() + + self.cachedir = self.bldnode.make_node(Build.CACHE_DIR) + self.cachedir.mkdir() + + path = os.path.join(self.bldnode.abspath(), WAF_CONFIG_LOG) + self.logger = Logs.make_logger(path, 'cfg') + + app = getattr(Context.g_module, 'APPNAME', '') + if app: + ver = getattr(Context.g_module, 'VERSION', '') + if ver: + app = "%s (%s)" % (app, ver) + + params = {'now': time.ctime(), 'pyver': sys.hexversion, 'systype': sys.platform, 'args': " ".join(sys.argv), 'wafver': Context.WAFVERSION, 'abi': Context.ABI, 'app': app} + self.to_log(conf_template % params) + self.msg('Setting top to', self.srcnode.abspath()) + self.msg('Setting out to', self.bldnode.abspath()) + + if id(self.srcnode) == id(self.bldnode): + Logs.warn('Setting top == out') + elif id(self.path) != id(self.srcnode): + if self.srcnode.is_child_of(self.path): + Logs.warn('Are you certain that you do not want to set top="." ?') + + super(ConfigurationContext, self).execute() + + self.store() + + Context.top_dir = self.srcnode.abspath() + Context.out_dir = self.bldnode.abspath() + + # this will write a configure lock so that subsequent builds will + # consider the current path as the root directory (see prepare_impl). + # to remove: use 'waf distclean' + env = ConfigSet.ConfigSet() + env.argv = sys.argv + env.options = Options.options.__dict__ + env.config_cmd = self.cmd + + env.run_dir = Context.run_dir + env.top_dir = Context.top_dir + env.out_dir = Context.out_dir + + # conf.hash & conf.files hold wscript files paths and hash + # (used only by Configure.autoconfig) + env.hash = self.hash + env.files = self.files + env.environ = dict(self.environ) + env.launch_dir = Context.launch_dir + + if not (self.env.NO_LOCK_IN_RUN or env.environ.get('NO_LOCK_IN_RUN') or getattr(Options.options, 'no_lock_in_run')): + env.store(os.path.join(Context.run_dir, Options.lockfile)) + if not (self.env.NO_LOCK_IN_TOP or env.environ.get('NO_LOCK_IN_TOP') or getattr(Options.options, 'no_lock_in_top')): + env.store(os.path.join(Context.top_dir, Options.lockfile)) + if not (self.env.NO_LOCK_IN_OUT or env.environ.get('NO_LOCK_IN_OUT') or getattr(Options.options, 'no_lock_in_out')): + env.store(os.path.join(Context.out_dir, Options.lockfile)) + + def prepare_env(self, env): + """ + Insert *PREFIX*, *BINDIR* and *LIBDIR* values into ``env`` + + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :param env: a ConfigSet, usually ``conf.env`` + """ + if not env.PREFIX: + if Options.options.prefix or Utils.is_win32: + env.PREFIX = Options.options.prefix + else: + env.PREFIX = '/' + if not env.BINDIR: + if Options.options.bindir: + env.BINDIR = Options.options.bindir + else: + env.BINDIR = Utils.subst_vars('${PREFIX}/bin', env) + if not env.LIBDIR: + if Options.options.libdir: + env.LIBDIR = Options.options.libdir + else: + env.LIBDIR = Utils.subst_vars('${PREFIX}/lib%s' % Utils.lib64(), env) + + def store(self): + """Save the config results into the cache file""" + n = self.cachedir.make_node('build.config.py') + n.write('version = 0x%x\ntools = %r\n' % (Context.HEXVERSION, self.tools)) + + if not self.all_envs: + self.fatal('nothing to store in the configuration context!') + + for key in self.all_envs: + tmpenv = self.all_envs[key] + tmpenv.store(os.path.join(self.cachedir.abspath(), key + Build.CACHE_SUFFIX)) + + def load(self, tool_list, tooldir=None, funs=None, with_sys_path=True, cache=False): + """ + Load Waf tools, which will be imported whenever a build is started. + + :param tool_list: waf tools to import + :type tool_list: list of string + :param tooldir: paths for the imports + :type tooldir: list of string + :param funs: functions to execute from the waf tools + :type funs: list of string + :param cache: whether to prevent the tool from running twice + :type cache: bool + """ + + tools = Utils.to_list(tool_list) + if tooldir: + tooldir = Utils.to_list(tooldir) + for tool in tools: + # avoid loading the same tool more than once with the same functions + # used by composite projects + + if cache: + mag = (tool, id(self.env), tooldir, funs) + if mag in self.tool_cache: + self.to_log('(tool %s is already loaded, skipping)' % tool) + continue + self.tool_cache.append(mag) + + module = None + try: + module = Context.load_tool(tool, tooldir, ctx=self, with_sys_path=with_sys_path) + except ImportError as e: + self.fatal('Could not load the Waf tool %r from %r\n%s' % (tool, getattr(e, 'waf_sys_path', sys.path), e)) + except Exception as e: + self.to_log('imp %r (%r & %r)' % (tool, tooldir, funs)) + self.to_log(traceback.format_exc()) + raise + + if funs is not None: + self.eval_rules(funs) + else: + func = getattr(module, 'configure', None) + if func: + if type(func) is type(Utils.readf): + func(self) + else: + self.eval_rules(func) + + self.tools.append({'tool':tool, 'tooldir':tooldir, 'funs':funs}) + + def post_recurse(self, node): + """ + Records the path and a hash of the scripts visited, see :py:meth:`waflib.Context.Context.post_recurse` + + :param node: script + :type node: :py:class:`waflib.Node.Node` + """ + super(ConfigurationContext, self).post_recurse(node) + self.hash = Utils.h_list((self.hash, node.read('rb'))) + self.files.append(node.abspath()) + + def eval_rules(self, rules): + """ + Execute configuration tests provided as list of functions to run + + :param rules: list of configuration method names + :type rules: list of string + """ + self.rules = Utils.to_list(rules) + for x in self.rules: + f = getattr(self, x) + if not f: + self.fatal('No such configuration function %r' % x) + f() + +def conf(f): + """ + Decorator: attach new configuration functions to :py:class:`waflib.Build.BuildContext` and + :py:class:`waflib.Configure.ConfigurationContext`. The methods bound will accept a parameter + named 'mandatory' to disable the configuration errors:: + + def configure(conf): + conf.find_program('abc', mandatory=False) + + :param f: method to bind + :type f: function + """ + def fun(*k, **kw): + mandatory = kw.pop('mandatory', True) + try: + return f(*k, **kw) + except Errors.ConfigurationError: + if mandatory: + raise + + fun.__name__ = f.__name__ + setattr(ConfigurationContext, f.__name__, fun) + setattr(Build.BuildContext, f.__name__, fun) + return f + +@conf +def add_os_flags(self, var, dest=None, dup=False): + """ + Import operating system environment values into ``conf.env`` dict:: + + def configure(conf): + conf.add_os_flags('CFLAGS') + + :param var: variable to use + :type var: string + :param dest: destination variable, by default the same as var + :type dest: string + :param dup: add the same set of flags again + :type dup: bool + """ + try: + flags = shlex.split(self.environ[var]) + except KeyError: + return + if dup or ''.join(flags) not in ''.join(Utils.to_list(self.env[dest or var])): + self.env.append_value(dest or var, flags) + +@conf +def cmd_to_list(self, cmd): + """ + Detect if a command is written in pseudo shell like ``ccache g++`` and return a list. + + :param cmd: command + :type cmd: a string or a list of string + """ + if isinstance(cmd, str): + if os.path.isfile(cmd): + # do not take any risk + return [cmd] + if os.sep == '/': + return shlex.split(cmd) + else: + try: + return shlex.split(cmd, posix=False) + except TypeError: + # Python 2.5 on windows? + return shlex.split(cmd) + return cmd + +@conf +def check_waf_version(self, mini='1.9.99', maxi='2.1.0', **kw): + """ + Raise a Configuration error if the Waf version does not strictly match the given bounds:: + + conf.check_waf_version(mini='1.9.99', maxi='2.1.0') + + :type mini: number, tuple or string + :param mini: Minimum required version + :type maxi: number, tuple or string + :param maxi: Maximum allowed version + """ + self.start_msg('Checking for waf version in %s-%s' % (str(mini), str(maxi)), **kw) + ver = Context.HEXVERSION + if Utils.num2ver(mini) > ver: + self.fatal('waf version should be at least %r (%r found)' % (Utils.num2ver(mini), ver)) + if Utils.num2ver(maxi) < ver: + self.fatal('waf version should be at most %r (%r found)' % (Utils.num2ver(maxi), ver)) + self.end_msg('ok', **kw) + +@conf +def find_file(self, filename, path_list=[]): + """ + Find a file in a list of paths + + :param filename: name of the file to search for + :param path_list: list of directories to search + :return: the first matching filename; else a configuration exception is raised + """ + for n in Utils.to_list(filename): + for d in Utils.to_list(path_list): + p = os.path.expanduser(os.path.join(d, n)) + if os.path.exists(p): + return p + self.fatal('Could not find %r' % filename) + +@conf +def find_program(self, filename, **kw): + """ + Search for a program on the operating system + + When var is used, you may set os.environ[var] to help find a specific program version, for example:: + + $ CC='ccache gcc' waf configure + + :param path_list: paths to use for searching + :type param_list: list of string + :param var: store the result to conf.env[var] where var defaults to filename.upper() if not provided; the result is stored as a list of strings + :type var: string + :param value: obtain the program from the value passed exclusively + :type value: list or string (list is preferred) + :param exts: list of extensions for the binary (do not add an extension for portability) + :type exts: list of string + :param msg: name to display in the log, by default filename is used + :type msg: string + :param interpreter: interpreter for the program + :type interpreter: ConfigSet variable key + :raises: :py:class:`waflib.Errors.ConfigurationError` + """ + + exts = kw.get('exts', Utils.is_win32 and '.exe,.com,.bat,.cmd' or ',.sh,.pl,.py') + + environ = kw.get('environ', getattr(self, 'environ', os.environ)) + + ret = '' + + filename = Utils.to_list(filename) + msg = kw.get('msg', ', '.join(filename)) + + var = kw.get('var', '') + if not var: + var = re.sub(r'\W', '_', filename[0].upper()) + + path_list = kw.get('path_list', '') + if path_list: + path_list = Utils.to_list(path_list) + else: + path_list = environ.get('PATH', '').split(os.pathsep) + + if kw.get('value'): + # user-provided in command-line options and passed to find_program + ret = self.cmd_to_list(kw['value']) + elif environ.get(var): + # user-provided in the os environment + ret = self.cmd_to_list(environ[var]) + elif self.env[var]: + # a default option in the wscript file + ret = self.cmd_to_list(self.env[var]) + else: + if not ret: + ret = self.find_binary(filename, exts.split(','), path_list) + if not ret and Utils.winreg: + ret = Utils.get_registry_app_path(Utils.winreg.HKEY_CURRENT_USER, filename) + if not ret and Utils.winreg: + ret = Utils.get_registry_app_path(Utils.winreg.HKEY_LOCAL_MACHINE, filename) + ret = self.cmd_to_list(ret) + + if ret: + if len(ret) == 1: + retmsg = ret[0] + else: + retmsg = ret + else: + retmsg = False + + self.msg('Checking for program %r' % msg, retmsg, **kw) + if not kw.get('quiet'): + self.to_log('find program=%r paths=%r var=%r -> %r' % (filename, path_list, var, ret)) + + if not ret: + self.fatal(kw.get('errmsg', '') or 'Could not find the program %r' % filename) + + interpreter = kw.get('interpreter') + if interpreter is None: + if not Utils.check_exe(ret[0], env=environ): + self.fatal('Program %r is not executable' % ret) + self.env[var] = ret + else: + self.env[var] = self.env[interpreter] + ret + + return ret + +@conf +def find_binary(self, filenames, exts, paths): + for f in filenames: + for ext in exts: + exe_name = f + ext + if os.path.isabs(exe_name): + if os.path.isfile(exe_name): + return exe_name + else: + for path in paths: + x = os.path.expanduser(os.path.join(path, exe_name)) + if os.path.isfile(x): + return x + return None + +@conf +def run_build(self, *k, **kw): + """ + Create a temporary build context to execute a build. A temporary reference to that build + context is kept on self.test_bld for debugging purposes. + The arguments to this function are passed to a single task generator for that build. + Only three parameters are mandatory: + + :param features: features to pass to a task generator created in the build + :type features: list of string + :param compile_filename: file to create for the compilation (default: *test.c*) + :type compile_filename: string + :param code: input file contents + :type code: string + + Though this function returns *0* by default, the build may bind attribute named *retval* on the + build context object to return a particular value. See :py:func:`waflib.Tools.c_config.test_exec_fun` for example. + + The temporary builds creates a temporary folder; the name of that folder is calculated + by hashing input arguments to this function, with the exception of :py:class:`waflib.ConfigSet.ConfigSet` + objects which are used for both reading and writing values. + + This function also features a cache which is disabled by default; that cache relies + on the hash value calculated as indicated above:: + + def options(opt): + opt.add_option('--confcache', dest='confcache', default=0, + action='count', help='Use a configuration cache') + + And execute the configuration with the following command-line:: + + $ waf configure --confcache + + """ + buf = [] + for key in sorted(kw.keys()): + v = kw[key] + if isinstance(v, ConfigSet.ConfigSet): + # values are being written to, so they are excluded from contributing to the hash + continue + elif hasattr(v, '__call__'): + buf.append(Utils.h_fun(v)) + else: + buf.append(str(v)) + h = Utils.h_list(buf) + dir = self.bldnode.abspath() + os.sep + (not Utils.is_win32 and '.' or '') + 'conf_check_' + Utils.to_hex(h) + + cachemode = kw.get('confcache', getattr(Options.options, 'confcache', None)) + + if not cachemode and os.path.exists(dir): + shutil.rmtree(dir) + + try: + os.makedirs(dir) + except OSError: + pass + + try: + os.stat(dir) + except OSError: + self.fatal('cannot use the configuration test folder %r' % dir) + + if cachemode == 1: + try: + proj = ConfigSet.ConfigSet(os.path.join(dir, 'cache_run_build')) + except EnvironmentError: + pass + else: + ret = proj['cache_run_build'] + if isinstance(ret, str) and ret.startswith('Test does not build'): + self.fatal(ret) + return ret + + bdir = os.path.join(dir, 'testbuild') + + if not os.path.exists(bdir): + os.makedirs(bdir) + + cls_name = kw.get('run_build_cls') or getattr(self, 'run_build_cls', 'build') + self.test_bld = bld = Context.create_context(cls_name, top_dir=dir, out_dir=bdir) + bld.init_dirs() + bld.progress_bar = 0 + bld.targets = '*' + + bld.logger = self.logger + bld.all_envs.update(self.all_envs) # not really necessary + bld.env = kw['env'] + + bld.kw = kw + bld.conf = self + kw['build_fun'](bld) + ret = -1 + try: + try: + bld.compile() + except Errors.WafError: + ret = 'Test does not build: %s' % traceback.format_exc() + self.fatal(ret) + else: + ret = getattr(bld, 'retval', 0) + finally: + if cachemode: + # cache the results each time + proj = ConfigSet.ConfigSet() + proj['cache_run_build'] = ret + proj.store(os.path.join(dir, 'cache_run_build')) + else: + shutil.rmtree(dir) + return ret + +@conf +def ret_msg(self, msg, args): + if isinstance(msg, str): + return msg + return msg(args) + +@conf +def test(self, *k, **kw): + + if not 'env' in kw: + kw['env'] = self.env.derive() + + # validate_c for example + if kw.get('validate'): + kw['validate'](kw) + + self.start_msg(kw['msg'], **kw) + ret = None + try: + ret = self.run_build(*k, **kw) + except self.errors.ConfigurationError: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + if Logs.verbose > 1: + raise + else: + self.fatal('The configuration failed') + else: + kw['success'] = ret + + if kw.get('post_check'): + ret = kw['post_check'](kw) + + if ret: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + self.fatal('The configuration failed %r' % ret) + else: + self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) + return ret + diff --git a/third_party/waf/waflib/Context.py b/third_party/waf/waflib/Context.py new file mode 100644 index 0000000..3696648 --- /dev/null +++ b/third_party/waf/waflib/Context.py @@ -0,0 +1,747 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010-2018 (ita) + +""" +Classes and functions enabling the command system +""" + +import os, re, sys +from waflib import Utils, Errors, Logs +import waflib.Node + +if sys.hexversion > 0x3040000: + import types + class imp(object): + new_module = lambda x: types.ModuleType(x) +else: + import imp + +# the following 3 constants are updated on each new release (do not touch) +HEXVERSION=0x2001a00 +"""Constant updated on new releases""" + +WAFVERSION="2.0.26" +"""Constant updated on new releases""" + +WAFREVISION="0fb985ce1932c6f3e7533f435e4ee209d673776e" +"""Git revision when the waf version is updated""" + +WAFNAME="waf" +"""Application name displayed on --help""" + +ABI = 20 +"""Version of the build data cache file format (used in :py:const:`waflib.Context.DBFILE`)""" + +DBFILE = '.wafpickle-%s-%d-%d' % (sys.platform, sys.hexversion, ABI) +"""Name of the pickle file for storing the build data""" + +APPNAME = 'APPNAME' +"""Default application name (used by ``waf dist``)""" + +VERSION = 'VERSION' +"""Default application version (used by ``waf dist``)""" + +TOP = 'top' +"""The variable name for the top-level directory in wscript files""" + +OUT = 'out' +"""The variable name for the output directory in wscript files""" + +WSCRIPT_FILE = 'wscript' +"""Name of the waf script files""" + +launch_dir = '' +"""Directory from which waf has been called""" +run_dir = '' +"""Location of the wscript file to use as the entry point""" +top_dir = '' +"""Location of the project directory (top), if the project was configured""" +out_dir = '' +"""Location of the build directory (out), if the project was configured""" +waf_dir = '' +"""Directory containing the waf modules""" + +default_encoding = Utils.console_encoding() +"""Encoding to use when reading outputs from other processes""" + +g_module = None +""" +Module representing the top-level wscript file (see :py:const:`waflib.Context.run_dir`) +""" + +STDOUT = 1 +STDERR = -1 +BOTH = 0 + +classes = [] +""" +List of :py:class:`waflib.Context.Context` subclasses that can be used as waf commands. The classes +are added automatically by a metaclass. +""" + +def create_context(cmd_name, *k, **kw): + """ + Returns a new :py:class:`waflib.Context.Context` instance corresponding to the given command. + Used in particular by :py:func:`waflib.Scripting.run_command` + + :param cmd_name: command name + :type cmd_name: string + :param k: arguments to give to the context class initializer + :type k: list + :param k: keyword arguments to give to the context class initializer + :type k: dict + :return: Context object + :rtype: :py:class:`waflib.Context.Context` + """ + for x in classes: + if x.cmd == cmd_name: + return x(*k, **kw) + ctx = Context(*k, **kw) + ctx.fun = cmd_name + return ctx + +class store_context(type): + """ + Metaclass that registers command classes into the list :py:const:`waflib.Context.classes` + Context classes must provide an attribute 'cmd' representing the command name, and a function + attribute 'fun' representing the function name that the command uses. + """ + def __init__(cls, name, bases, dct): + super(store_context, cls).__init__(name, bases, dct) + name = cls.__name__ + + if name in ('ctx', 'Context'): + return + + try: + cls.cmd + except AttributeError: + raise Errors.WafError('Missing command for the context class %r (cmd)' % name) + + if not getattr(cls, 'fun', None): + cls.fun = cls.cmd + + classes.insert(0, cls) + +ctx = store_context('ctx', (object,), {}) +"""Base class for all :py:class:`waflib.Context.Context` classes""" + +class Context(ctx): + """ + Default context for waf commands, and base class for new command contexts. + + Context objects are passed to top-level functions:: + + def foo(ctx): + print(ctx.__class__.__name__) # waflib.Context.Context + + Subclasses must define the class attributes 'cmd' and 'fun': + + :param cmd: command to execute as in ``waf cmd`` + :type cmd: string + :param fun: function name to execute when the command is called + :type fun: string + + .. inheritance-diagram:: waflib.Context.Context waflib.Build.BuildContext waflib.Build.InstallContext waflib.Build.UninstallContext waflib.Build.StepContext waflib.Build.ListContext waflib.Configure.ConfigurationContext waflib.Scripting.Dist waflib.Scripting.DistCheck waflib.Build.CleanContext + :top-classes: waflib.Context.Context + """ + + errors = Errors + """ + Shortcut to :py:mod:`waflib.Errors` provided for convenience + """ + + tools = {} + """ + A module cache for wscript files; see :py:meth:`Context.Context.load` + """ + + def __init__(self, **kw): + try: + rd = kw['run_dir'] + except KeyError: + rd = run_dir + + # binds the context to the nodes in use to avoid a context singleton + self.node_class = type('Nod3', (waflib.Node.Node,), {}) + self.node_class.__module__ = 'waflib.Node' + self.node_class.ctx = self + + self.root = self.node_class('', None) + self.cur_script = None + self.path = self.root.find_dir(rd) + + self.stack_path = [] + self.exec_dict = {'ctx':self, 'conf':self, 'bld':self, 'opt':self} + self.logger = None + + def finalize(self): + """ + Called to free resources such as logger files + """ + try: + logger = self.logger + except AttributeError: + pass + else: + Logs.free_logger(logger) + delattr(self, 'logger') + + def load(self, tool_list, *k, **kw): + """ + Loads a Waf tool as a module, and try calling the function named :py:const:`waflib.Context.Context.fun` + from it. A ``tooldir`` argument may be provided as a list of module paths. + + :param tool_list: list of Waf tool names to load + :type tool_list: list of string or space-separated string + """ + tools = Utils.to_list(tool_list) + path = Utils.to_list(kw.get('tooldir', '')) + with_sys_path = kw.get('with_sys_path', True) + + for t in tools: + module = load_tool(t, path, with_sys_path=with_sys_path) + fun = getattr(module, kw.get('name', self.fun), None) + if fun: + fun(self) + + def execute(self): + """ + Here, it calls the function name in the top-level wscript file. Most subclasses + redefine this method to provide additional functionality. + """ + self.recurse([os.path.dirname(g_module.root_path)]) + + def pre_recurse(self, node): + """ + Method executed immediately before a folder is read by :py:meth:`waflib.Context.Context.recurse`. + The current script is bound as a Node object on ``self.cur_script``, and the current path + is bound to ``self.path`` + + :param node: script + :type node: :py:class:`waflib.Node.Node` + """ + self.stack_path.append(self.cur_script) + + self.cur_script = node + self.path = node.parent + + def post_recurse(self, node): + """ + Restores ``self.cur_script`` and ``self.path`` right after :py:meth:`waflib.Context.Context.recurse` terminates. + + :param node: script + :type node: :py:class:`waflib.Node.Node` + """ + self.cur_script = self.stack_path.pop() + if self.cur_script: + self.path = self.cur_script.parent + + def recurse(self, dirs, name=None, mandatory=True, once=True, encoding=None): + """ + Runs user-provided functions from the supplied list of directories. + The directories can be either absolute, or relative to the directory + of the wscript file + + The methods :py:meth:`waflib.Context.Context.pre_recurse` and + :py:meth:`waflib.Context.Context.post_recurse` are called immediately before + and after a script has been executed. + + :param dirs: List of directories to visit + :type dirs: list of string or space-separated string + :param name: Name of function to invoke from the wscript + :type name: string + :param mandatory: whether sub wscript files are required to exist + :type mandatory: bool + :param once: read the script file once for a particular context + :type once: bool + """ + try: + cache = self.recurse_cache + except AttributeError: + cache = self.recurse_cache = {} + + for d in Utils.to_list(dirs): + + if not os.path.isabs(d): + # absolute paths only + d = os.path.join(self.path.abspath(), d) + + WSCRIPT = os.path.join(d, WSCRIPT_FILE) + WSCRIPT_FUN = WSCRIPT + '_' + (name or self.fun) + + node = self.root.find_node(WSCRIPT_FUN) + if node and (not once or node not in cache): + cache[node] = True + self.pre_recurse(node) + try: + function_code = node.read('r', encoding) + exec(compile(function_code, node.abspath(), 'exec'), self.exec_dict) + finally: + self.post_recurse(node) + elif not node: + node = self.root.find_node(WSCRIPT) + tup = (node, name or self.fun) + if node and (not once or tup not in cache): + cache[tup] = True + self.pre_recurse(node) + try: + wscript_module = load_module(node.abspath(), encoding=encoding) + user_function = getattr(wscript_module, (name or self.fun), None) + if not user_function: + if not mandatory: + continue + raise Errors.WafError('No function %r defined in %s' % (name or self.fun, node.abspath())) + user_function(self) + finally: + self.post_recurse(node) + elif not node: + if not mandatory: + continue + try: + os.listdir(d) + except OSError: + raise Errors.WafError('Cannot read the folder %r' % d) + raise Errors.WafError('No wscript file in directory %s' % d) + + def log_command(self, cmd, kw): + if Logs.verbose: + fmt = os.environ.get('WAF_CMD_FORMAT') + if fmt == 'string': + if not isinstance(cmd, str): + cmd = Utils.shell_escape(cmd) + Logs.debug('runner: %r', cmd) + Logs.debug('runner_env: kw=%s', kw) + + def exec_command(self, cmd, **kw): + """ + Runs an external process and returns the exit status:: + + def run(tsk): + ret = tsk.generator.bld.exec_command('touch foo.txt') + return ret + + If the context has the attribute 'log', then captures and logs the process stderr/stdout. + Unlike :py:meth:`waflib.Context.Context.cmd_and_log`, this method does not return the + stdout/stderr values captured. + + :param cmd: command argument for subprocess.Popen + :type cmd: string or list + :param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate. + :type kw: dict + :returns: process exit status + :rtype: integer + :raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process + :raises: :py:class:`waflib.Errors.WafError` in case of execution failure + """ + subprocess = Utils.subprocess + kw['shell'] = isinstance(cmd, str) + self.log_command(cmd, kw) + + if self.logger: + self.logger.info(cmd) + + if 'stdout' not in kw: + kw['stdout'] = subprocess.PIPE + if 'stderr' not in kw: + kw['stderr'] = subprocess.PIPE + + if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]): + raise Errors.WafError('Program %s not found!' % cmd[0]) + + cargs = {} + if 'timeout' in kw: + if sys.hexversion >= 0x3030000: + cargs['timeout'] = kw['timeout'] + if not 'start_new_session' in kw: + kw['start_new_session'] = True + del kw['timeout'] + if 'input' in kw: + if kw['input']: + cargs['input'] = kw['input'] + kw['stdin'] = subprocess.PIPE + del kw['input'] + + if 'cwd' in kw: + if not isinstance(kw['cwd'], str): + kw['cwd'] = kw['cwd'].abspath() + + encoding = kw.pop('decode_as', default_encoding) + + try: + ret, out, err = Utils.run_process(cmd, kw, cargs) + except Exception as e: + raise Errors.WafError('Execution failure: %s' % str(e), ex=e) + + if out: + if not isinstance(out, str): + out = out.decode(encoding, errors='replace') + if self.logger: + self.logger.debug('out: %s', out) + else: + Logs.info(out, extra={'stream':sys.stdout, 'c1': ''}) + if err: + if not isinstance(err, str): + err = err.decode(encoding, errors='replace') + if self.logger: + self.logger.error('err: %s' % err) + else: + Logs.info(err, extra={'stream':sys.stderr, 'c1': ''}) + + return ret + + def cmd_and_log(self, cmd, **kw): + """ + Executes a process and returns stdout/stderr if the execution is successful. + An exception is thrown when the exit status is non-0. In that case, both stderr and stdout + will be bound to the WafError object (configuration tests):: + + def configure(conf): + out = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.STDOUT, quiet=waflib.Context.BOTH) + (out, err) = conf.cmd_and_log(['echo', 'hello'], output=waflib.Context.BOTH) + (out, err) = conf.cmd_and_log(cmd, input='\\n'.encode(), output=waflib.Context.STDOUT) + try: + conf.cmd_and_log(['which', 'someapp'], output=waflib.Context.BOTH) + except Errors.WafError as e: + print(e.stdout, e.stderr) + + :param cmd: args for subprocess.Popen + :type cmd: list or string + :param kw: keyword arguments for subprocess.Popen. The parameters input/timeout will be passed to wait/communicate. + :type kw: dict + :returns: a tuple containing the contents of stdout and stderr + :rtype: string + :raises: :py:class:`waflib.Errors.WafError` if an invalid executable is specified for a non-shell process + :raises: :py:class:`waflib.Errors.WafError` in case of execution failure; stdout/stderr/returncode are bound to the exception object + """ + subprocess = Utils.subprocess + kw['shell'] = isinstance(cmd, str) + self.log_command(cmd, kw) + + quiet = kw.pop('quiet', None) + to_ret = kw.pop('output', STDOUT) + + if Logs.verbose and not kw['shell'] and not Utils.check_exe(cmd[0]): + raise Errors.WafError('Program %r not found!' % cmd[0]) + + kw['stdout'] = kw['stderr'] = subprocess.PIPE + if quiet is None: + self.to_log(cmd) + + cargs = {} + if 'timeout' in kw: + if sys.hexversion >= 0x3030000: + cargs['timeout'] = kw['timeout'] + if not 'start_new_session' in kw: + kw['start_new_session'] = True + del kw['timeout'] + if 'input' in kw: + if kw['input']: + cargs['input'] = kw['input'] + kw['stdin'] = subprocess.PIPE + del kw['input'] + + if 'cwd' in kw: + if not isinstance(kw['cwd'], str): + kw['cwd'] = kw['cwd'].abspath() + + encoding = kw.pop('decode_as', default_encoding) + + try: + ret, out, err = Utils.run_process(cmd, kw, cargs) + except Exception as e: + raise Errors.WafError('Execution failure: %s' % str(e), ex=e) + + if not isinstance(out, str): + out = out.decode(encoding, errors='replace') + if not isinstance(err, str): + err = err.decode(encoding, errors='replace') + + if out and quiet != STDOUT and quiet != BOTH: + self.to_log('out: %s' % out) + if err and quiet != STDERR and quiet != BOTH: + self.to_log('err: %s' % err) + + if ret: + e = Errors.WafError('Command %r returned %r' % (cmd, ret)) + e.returncode = ret + e.stderr = err + e.stdout = out + raise e + + if to_ret == BOTH: + return (out, err) + elif to_ret == STDERR: + return err + return out + + def fatal(self, msg, ex=None): + """ + Prints an error message in red and stops command execution; this is + usually used in the configuration section:: + + def configure(conf): + conf.fatal('a requirement is missing') + + :param msg: message to display + :type msg: string + :param ex: optional exception object + :type ex: exception + :raises: :py:class:`waflib.Errors.ConfigurationError` + """ + if self.logger: + self.logger.info('from %s: %s' % (self.path.abspath(), msg)) + try: + logfile = self.logger.handlers[0].baseFilename + except AttributeError: + pass + else: + if os.environ.get('WAF_PRINT_FAILURE_LOG'): + # see #1930 + msg = 'Log from (%s):\n%s\n' % (logfile, Utils.readf(logfile)) + else: + msg = '%s\n(complete log in %s)' % (msg, logfile) + raise self.errors.ConfigurationError(msg, ex=ex) + + def to_log(self, msg): + """ + Logs information to the logger (if present), or to stderr. + Empty messages are not printed:: + + def build(bld): + bld.to_log('starting the build') + + Provide a logger on the context class or override this method if necessary. + + :param msg: message + :type msg: string + """ + if not msg: + return + if self.logger: + self.logger.info(msg) + else: + sys.stderr.write(str(msg)) + sys.stderr.flush() + + + def msg(self, *k, **kw): + """ + Prints a configuration message of the form ``msg: result``. + The second part of the message will be in colors. The output + can be disabled easily by setting ``in_msg`` to a positive value:: + + def configure(conf): + self.in_msg = 1 + conf.msg('Checking for library foo', 'ok') + # no output + + :param msg: message to display to the user + :type msg: string + :param result: result to display + :type result: string or boolean + :param color: color to use, see :py:const:`waflib.Logs.colors_lst` + :type color: string + """ + try: + msg = kw['msg'] + except KeyError: + msg = k[0] + + self.start_msg(msg, **kw) + + try: + result = kw['result'] + except KeyError: + result = k[1] + + color = kw.get('color') + if not isinstance(color, str): + color = result and 'GREEN' or 'YELLOW' + + self.end_msg(result, color, **kw) + + def start_msg(self, *k, **kw): + """ + Prints the beginning of a 'Checking for xxx' message. See :py:meth:`waflib.Context.Context.msg` + """ + if kw.get('quiet'): + return + + msg = kw.get('msg') or k[0] + try: + if self.in_msg: + self.in_msg += 1 + return + except AttributeError: + self.in_msg = 0 + self.in_msg += 1 + + try: + self.line_just = max(self.line_just, len(msg)) + except AttributeError: + self.line_just = max(40, len(msg)) + for x in (self.line_just * '-', msg): + self.to_log(x) + Logs.pprint('NORMAL', "%s :" % msg.ljust(self.line_just), sep='') + + def end_msg(self, *k, **kw): + """Prints the end of a 'Checking for' message. See :py:meth:`waflib.Context.Context.msg`""" + if kw.get('quiet'): + return + self.in_msg -= 1 + if self.in_msg: + return + + result = kw.get('result') or k[0] + + defcolor = 'GREEN' + if result is True: + msg = 'ok' + elif not result: + msg = 'not found' + defcolor = 'YELLOW' + else: + msg = str(result) + + self.to_log(msg) + try: + color = kw['color'] + except KeyError: + if len(k) > 1 and k[1] in Logs.colors_lst: + # compatibility waf 1.7 + color = k[1] + else: + color = defcolor + Logs.pprint(color, msg) + + def load_special_tools(self, var, ban=[]): + """ + Loads third-party extensions modules for certain programming languages + by trying to list certain files in the extras/ directory. This method + is typically called once for a programming language group, see for + example :py:mod:`waflib.Tools.compiler_c` + + :param var: glob expression, for example 'cxx\\_\\*.py' + :type var: string + :param ban: list of exact file names to exclude + :type ban: list of string + """ + if os.path.isdir(waf_dir): + lst = self.root.find_node(waf_dir).find_node('waflib/extras').ant_glob(var) + for x in lst: + if not x.name in ban: + load_tool(x.name.replace('.py', '')) + else: + from zipfile import PyZipFile + waflibs = PyZipFile(waf_dir) + lst = waflibs.namelist() + for x in lst: + if not re.match('waflib/extras/%s' % var.replace('*', '.*'), var): + continue + f = os.path.basename(x) + doban = False + for b in ban: + r = b.replace('*', '.*') + if re.match(r, f): + doban = True + if not doban: + f = f.replace('.py', '') + load_tool(f) + +cache_modules = {} +""" +Dictionary holding already loaded modules (wscript), indexed by their absolute path. +The modules are added automatically by :py:func:`waflib.Context.load_module` +""" + +def load_module(path, encoding=None): + """ + Loads a wscript file as a python module. This method caches results in :py:attr:`waflib.Context.cache_modules` + + :param path: file path + :type path: string + :return: Loaded Python module + :rtype: module + """ + try: + return cache_modules[path] + except KeyError: + pass + + module = imp.new_module(WSCRIPT_FILE) + try: + code = Utils.readf(path, m='r', encoding=encoding) + except EnvironmentError: + raise Errors.WafError('Could not read the file %r' % path) + + module_dir = os.path.dirname(path) + sys.path.insert(0, module_dir) + try: + exec(compile(code, path, 'exec'), module.__dict__) + finally: + sys.path.remove(module_dir) + + cache_modules[path] = module + return module + +def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True): + """ + Imports a Waf tool as a python module, and stores it in the dict :py:const:`waflib.Context.Context.tools` + + :type tool: string + :param tool: Name of the tool + :type tooldir: list + :param tooldir: List of directories to search for the tool module + :type with_sys_path: boolean + :param with_sys_path: whether or not to search the regular sys.path, besides waf_dir and potentially given tooldirs + """ + if tool == 'java': + tool = 'javaw' # jython + else: + tool = tool.replace('++', 'xx') + + if not with_sys_path: + back_path = sys.path + sys.path = [] + try: + if tooldir: + assert isinstance(tooldir, list) + sys.path = tooldir + sys.path + try: + __import__(tool) + except ImportError as e: + e.waf_sys_path = list(sys.path) + raise + finally: + for d in tooldir: + sys.path.remove(d) + ret = sys.modules[tool] + Context.tools[tool] = ret + return ret + else: + if not with_sys_path: + sys.path.insert(0, waf_dir) + try: + for x in ('waflib.Tools.%s', 'waflib.extras.%s', 'waflib.%s', '%s'): + try: + __import__(x % tool) + break + except ImportError: + x = None + else: # raise an exception + __import__(tool) + except ImportError as e: + e.waf_sys_path = list(sys.path) + raise + finally: + if not with_sys_path: + sys.path.remove(waf_dir) + ret = sys.modules[x % tool] + Context.tools[tool] = ret + return ret + finally: + if not with_sys_path: + sys.path += back_path + diff --git a/third_party/waf/waflib/Errors.py b/third_party/waf/waflib/Errors.py new file mode 100644 index 0000000..bf75c1b --- /dev/null +++ b/third_party/waf/waflib/Errors.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010-2018 (ita) + +""" +Exceptions used in the Waf code +""" + +import traceback, sys + +class WafError(Exception): + """Base class for all Waf errors""" + def __init__(self, msg='', ex=None): + """ + :param msg: error message + :type msg: string + :param ex: exception causing this error (optional) + :type ex: exception + """ + Exception.__init__(self) + self.msg = msg + assert not isinstance(msg, Exception) + + self.stack = [] + if ex: + if not msg: + self.msg = str(ex) + if isinstance(ex, WafError): + self.stack = ex.stack + else: + self.stack = traceback.extract_tb(sys.exc_info()[2]) + self.stack += traceback.extract_stack()[:-1] + self.verbose_msg = ''.join(traceback.format_list(self.stack)) + + def __str__(self): + return str(self.msg) + +class BuildError(WafError): + """Error raised during the build and install phases""" + def __init__(self, error_tasks=[]): + """ + :param error_tasks: tasks that could not complete normally + :type error_tasks: list of task objects + """ + self.tasks = error_tasks + WafError.__init__(self, self.format_error()) + + def format_error(self): + """Formats the error messages from the tasks that failed""" + lst = ['Build failed'] + for tsk in self.tasks: + txt = tsk.format_error() + if txt: + lst.append(txt) + return '\n'.join(lst) + +class ConfigurationError(WafError): + """Configuration exception raised in particular by :py:meth:`waflib.Context.Context.fatal`""" + pass + +class TaskRescan(WafError): + """Task-specific exception type signalling required signature recalculations""" + pass + +class TaskNotReady(WafError): + """Task-specific exception type signalling that task signatures cannot be computed""" + pass + diff --git a/third_party/waf/waflib/Logs.py b/third_party/waf/waflib/Logs.py new file mode 100644 index 0000000..298411d --- /dev/null +++ b/third_party/waf/waflib/Logs.py @@ -0,0 +1,382 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +logging, colors, terminal width and pretty-print +""" + +import os, re, traceback, sys +from waflib import Utils, ansiterm + +if not os.environ.get('NOSYNC', False): + # synchronized output is nearly mandatory to prevent garbled output + if sys.stdout.isatty() and id(sys.stdout) == id(sys.__stdout__): + sys.stdout = ansiterm.AnsiTerm(sys.stdout) + if sys.stderr.isatty() and id(sys.stderr) == id(sys.__stderr__): + sys.stderr = ansiterm.AnsiTerm(sys.stderr) + +# import the logging module after since it holds a reference on sys.stderr +# in case someone uses the root logger +import logging + +LOG_FORMAT = os.environ.get('WAF_LOG_FORMAT', '%(asctime)s %(c1)s%(zone)s%(c2)s %(message)s') +HOUR_FORMAT = os.environ.get('WAF_HOUR_FORMAT', '%H:%M:%S') + +zones = [] +""" +See :py:class:`waflib.Logs.log_filter` +""" + +verbose = 0 +""" +Global verbosity level, see :py:func:`waflib.Logs.debug` and :py:func:`waflib.Logs.error` +""" + +colors_lst = { +'USE' : True, +'BOLD' :'\x1b[01;1m', +'RED' :'\x1b[01;31m', +'GREEN' :'\x1b[32m', +'YELLOW':'\x1b[33m', +'PINK' :'\x1b[35m', +'BLUE' :'\x1b[01;34m', +'CYAN' :'\x1b[36m', +'GREY' :'\x1b[37m', +'NORMAL':'\x1b[0m', +'cursor_on' :'\x1b[?25h', +'cursor_off' :'\x1b[?25l', +} + +indicator = '\r\x1b[K%s%s%s' + +try: + unicode +except NameError: + unicode = None + +def enable_colors(use): + """ + If *1* is given, then the system will perform a few verifications + before enabling colors, such as checking whether the interpreter + is running in a terminal. A value of zero will disable colors, + and a value above *1* will force colors. + + :param use: whether to enable colors or not + :type use: integer + """ + if use == 1: + if not (sys.stderr.isatty() or sys.stdout.isatty()): + use = 0 + if Utils.is_win32 and os.name != 'java': + term = os.environ.get('TERM', '') # has ansiterm + else: + term = os.environ.get('TERM', 'dumb') + + if term in ('dumb', 'emacs'): + use = 0 + + if use >= 1: + os.environ['TERM'] = 'vt100' + + colors_lst['USE'] = use + +# If console packages are available, replace the dummy function with a real +# implementation +try: + get_term_cols = ansiterm.get_term_cols +except AttributeError: + def get_term_cols(): + return 80 + +get_term_cols.__doc__ = """ + Returns the console width in characters. + + :return: the number of characters per line + :rtype: int + """ + +def get_color(cl): + """ + Returns the ansi sequence corresponding to the given color name. + An empty string is returned when coloring is globally disabled. + + :param cl: color name in capital letters + :type cl: string + """ + if colors_lst['USE']: + return colors_lst.get(cl, '') + return '' + +class color_dict(object): + """attribute-based color access, eg: colors.PINK""" + def __getattr__(self, a): + return get_color(a) + def __call__(self, a): + return get_color(a) + +colors = color_dict() + +re_log = re.compile(r'(\w+): (.*)', re.M) +class log_filter(logging.Filter): + """ + Waf logs are of the form 'name: message', and can be filtered by 'waf --zones=name'. + For example, the following:: + + from waflib import Logs + Logs.debug('test: here is a message') + + Will be displayed only when executing:: + + $ waf --zones=test + """ + def __init__(self, name=''): + logging.Filter.__init__(self, name) + + def filter(self, rec): + """ + Filters log records by zone and by logging level + + :param rec: log entry + """ + rec.zone = rec.module + if rec.levelno >= logging.INFO: + return True + + m = re_log.match(rec.msg) + if m: + rec.zone = m.group(1) + rec.msg = m.group(2) + + if zones: + return getattr(rec, 'zone', '') in zones or '*' in zones + elif not verbose > 2: + return False + return True + +class log_handler(logging.StreamHandler): + """Dispatches messages to stderr/stdout depending on the severity level""" + def emit(self, record): + """ + Delegates the functionality to :py:meth:`waflib.Log.log_handler.emit_override` + """ + # default implementation + try: + try: + self.stream = record.stream + except AttributeError: + if record.levelno >= logging.WARNING: + record.stream = self.stream = sys.stderr + else: + record.stream = self.stream = sys.stdout + self.emit_override(record) + self.flush() + except (KeyboardInterrupt, SystemExit): + raise + except: # from the python library -_- + self.handleError(record) + + def emit_override(self, record, **kw): + """ + Writes the log record to the desired stream (stderr/stdout) + """ + self.terminator = getattr(record, 'terminator', '\n') + stream = self.stream + if unicode: + # python2 + msg = self.formatter.format(record) + fs = '%s' + self.terminator + try: + if (isinstance(msg, unicode) and getattr(stream, 'encoding', None)): + fs = fs.decode(stream.encoding) + try: + stream.write(fs % msg) + except UnicodeEncodeError: + stream.write((fs % msg).encode(stream.encoding)) + else: + stream.write(fs % msg) + except UnicodeError: + stream.write((fs % msg).encode('utf-8')) + else: + logging.StreamHandler.emit(self, record) + +class formatter(logging.Formatter): + """Simple log formatter which handles colors""" + def __init__(self): + logging.Formatter.__init__(self, LOG_FORMAT, HOUR_FORMAT) + + def format(self, rec): + """ + Formats records and adds colors as needed. The records do not get + a leading hour format if the logging level is above *INFO*. + """ + try: + msg = rec.msg.decode('utf-8') + except Exception: + msg = rec.msg + + use = colors_lst['USE'] + if (use == 1 and rec.stream.isatty()) or use == 2: + + c1 = getattr(rec, 'c1', None) + if c1 is None: + c1 = '' + if rec.levelno >= logging.ERROR: + c1 = colors.RED + elif rec.levelno >= logging.WARNING: + c1 = colors.YELLOW + elif rec.levelno >= logging.INFO: + c1 = colors.GREEN + c2 = getattr(rec, 'c2', colors.NORMAL) + msg = '%s%s%s' % (c1, msg, c2) + else: + # remove single \r that make long lines in text files + # and other terminal commands + msg = re.sub(r'\r(?!\n)|\x1B\[(K|.*?(m|h|l))', '', msg) + + if rec.levelno >= logging.INFO: + # the goal of this is to format without the leading "Logs, hour" prefix + if rec.args: + try: + return msg % rec.args + except UnicodeDecodeError: + return msg.encode('utf-8') % rec.args + return msg + + rec.msg = msg + rec.c1 = colors.PINK + rec.c2 = colors.NORMAL + return logging.Formatter.format(self, rec) + +log = None +"""global logger for Logs.debug, Logs.error, etc""" + +def debug(*k, **kw): + """ + Wraps logging.debug and discards messages if the verbosity level :py:attr:`waflib.Logs.verbose` ≤ 0 + """ + if verbose: + k = list(k) + k[0] = k[0].replace('\n', ' ') + log.debug(*k, **kw) + +def error(*k, **kw): + """ + Wrap logging.errors, adds the stack trace when the verbosity level :py:attr:`waflib.Logs.verbose` ≥ 2 + """ + log.error(*k, **kw) + if verbose > 2: + st = traceback.extract_stack() + if st: + st = st[:-1] + buf = [] + for filename, lineno, name, line in st: + buf.append(' File %r, line %d, in %s' % (filename, lineno, name)) + if line: + buf.append(' %s' % line.strip()) + if buf: + log.error('\n'.join(buf)) + +def warn(*k, **kw): + """ + Wraps logging.warning + """ + log.warning(*k, **kw) + +def info(*k, **kw): + """ + Wraps logging.info + """ + log.info(*k, **kw) + +def init_log(): + """ + Initializes the logger :py:attr:`waflib.Logs.log` + """ + global log + log = logging.getLogger('waflib') + log.handlers = [] + log.filters = [] + hdlr = log_handler() + hdlr.setFormatter(formatter()) + log.addHandler(hdlr) + log.addFilter(log_filter()) + log.setLevel(logging.DEBUG) + +def make_logger(path, name): + """ + Creates a simple logger, which is often used to redirect the context command output:: + + from waflib import Logs + bld.logger = Logs.make_logger('test.log', 'build') + bld.check(header_name='sadlib.h', features='cxx cprogram', mandatory=False) + + # have the file closed immediately + Logs.free_logger(bld.logger) + + # stop logging + bld.logger = None + + The method finalize() of the command will try to free the logger, if any + + :param path: file name to write the log output to + :type path: string + :param name: logger name (loggers are reused) + :type name: string + """ + logger = logging.getLogger(name) + if sys.hexversion > 0x3000000: + encoding = sys.stdout.encoding + else: + encoding = None + hdlr = logging.FileHandler(path, 'w', encoding=encoding) + formatter = logging.Formatter('%(message)s') + hdlr.setFormatter(formatter) + logger.addHandler(hdlr) + logger.setLevel(logging.DEBUG) + return logger + +def make_mem_logger(name, to_log, size=8192): + """ + Creates a memory logger to avoid writing concurrently to the main logger + """ + from logging.handlers import MemoryHandler + logger = logging.getLogger(name) + hdlr = MemoryHandler(size, target=to_log) + formatter = logging.Formatter('%(message)s') + hdlr.setFormatter(formatter) + logger.addHandler(hdlr) + logger.memhandler = hdlr + logger.setLevel(logging.DEBUG) + return logger + +def free_logger(logger): + """ + Frees the resources held by the loggers created through make_logger or make_mem_logger. + This is used for file cleanup and for handler removal (logger objects are re-used). + """ + try: + for x in logger.handlers: + x.close() + logger.removeHandler(x) + except Exception: + pass + +def pprint(col, msg, label='', sep='\n'): + """ + Prints messages in color immediately on stderr:: + + from waflib import Logs + Logs.pprint('RED', 'Something bad just happened') + + :param col: color name to use in :py:const:`Logs.colors_lst` + :type col: string + :param msg: message to display + :type msg: string or a value that can be printed by %s + :param label: a message to add after the colored output + :type label: string + :param sep: a string to append at the end (line separator) + :type sep: string + """ + info('%s%s%s %s', colors(col), msg, colors.NORMAL, label, extra={'terminator':sep}) + diff --git a/third_party/waf/waflib/Node.py b/third_party/waf/waflib/Node.py new file mode 100644 index 0000000..2ad1846 --- /dev/null +++ b/third_party/waf/waflib/Node.py @@ -0,0 +1,969 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Node: filesystem structure + +#. Each file/folder is represented by exactly one node. + +#. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc. + Unused class members can increase the `.wafpickle` file size sensibly. + +#. Node objects should never be created directly, use + the methods :py:func:`Node.make_node` or :py:func:`Node.find_node` for the low-level operations + +#. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` must be + used when a build context is present + +#. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass required for serialization. + (:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context + owning a node is held as *self.ctx* +""" + +import os, re, sys, shutil +from waflib import Utils, Errors + +exclude_regs = ''' +**/*~ +**/#*# +**/.#* +**/%*% +**/._* +**/*.swp +**/CVS +**/CVS/** +**/.cvsignore +**/SCCS +**/SCCS/** +**/vssver.scc +**/.svn +**/.svn/** +**/BitKeeper +**/.git +**/.git/** +**/.gitignore +**/.bzr +**/.bzrignore +**/.bzr/** +**/.hg +**/.hg/** +**/_MTN +**/_MTN/** +**/.arch-ids +**/{arch} +**/_darcs +**/_darcs/** +**/.intlcache +**/.DS_Store''' +""" +Ant patterns for files and folders to exclude while doing the +recursive traversal in :py:meth:`waflib.Node.Node.ant_glob` +""" + +def ant_matcher(s, ignorecase): + reflags = re.I if ignorecase else 0 + ret = [] + for x in Utils.to_list(s): + x = x.replace('\\', '/').replace('//', '/') + if x.endswith('/'): + x += '**' + accu = [] + for k in x.split('/'): + if k == '**': + accu.append(k) + else: + k = k.replace('.', '[.]').replace('*', '.*').replace('?', '.').replace('+', '\\+') + k = '^%s$' % k + try: + exp = re.compile(k, flags=reflags) + except Exception as e: + raise Errors.WafError('Invalid pattern: %s' % k, e) + else: + accu.append(exp) + ret.append(accu) + return ret + +def ant_sub_filter(name, nn): + ret = [] + for lst in nn: + if not lst: + pass + elif lst[0] == '**': + ret.append(lst) + if len(lst) > 1: + if lst[1].match(name): + ret.append(lst[2:]) + else: + ret.append([]) + elif lst[0].match(name): + ret.append(lst[1:]) + return ret + +def ant_sub_matcher(name, pats): + nacc = ant_sub_filter(name, pats[0]) + nrej = ant_sub_filter(name, pats[1]) + if [] in nrej: + nacc = [] + return [nacc, nrej] + +class Node(object): + """ + This class is organized in two parts: + + * The basic methods meant for filesystem access (compute paths, create folders, etc) + * The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``) + """ + + dict_class = dict + """ + Subclasses can provide a dict class to enable case insensitivity for example. + """ + + __slots__ = ('name', 'parent', 'children', 'cache_abspath', 'cache_isdir') + def __init__(self, name, parent): + """ + .. note:: Use :py:func:`Node.make_node` or :py:func:`Node.find_node` instead of calling this constructor + """ + self.name = name + self.parent = parent + if parent: + if name in parent.children: + raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent)) + parent.children[name] = self + + def __setstate__(self, data): + "Deserializes node information, used for persistence" + self.name = data[0] + self.parent = data[1] + if data[2] is not None: + # Issue 1480 + self.children = self.dict_class(data[2]) + + def __getstate__(self): + "Serializes node information, used for persistence" + return (self.name, self.parent, getattr(self, 'children', None)) + + def __str__(self): + """ + String representation (abspath), for debugging purposes + + :rtype: string + """ + return self.abspath() + + def __repr__(self): + """ + String representation (abspath), for debugging purposes + + :rtype: string + """ + return self.abspath() + + def __copy__(self): + """ + Provided to prevent nodes from being copied + + :raises: :py:class:`waflib.Errors.WafError` + """ + raise Errors.WafError('nodes are not supposed to be copied') + + def read(self, flags='r', encoding='latin-1'): + """ + Reads and returns the contents of the file represented by this node, see :py:func:`waflib.Utils.readf`:: + + def build(bld): + bld.path.find_node('wscript').read() + + :param flags: Open mode + :type flags: string + :param encoding: encoding value for Python3 + :type encoding: string + :rtype: string or bytes + :return: File contents + """ + return Utils.readf(self.abspath(), flags, encoding) + + def write(self, data, flags='w', encoding='latin-1'): + """ + Writes data to the file represented by this node, see :py:func:`waflib.Utils.writef`:: + + def build(bld): + bld.path.make_node('foo.txt').write('Hello, world!') + + :param data: data to write + :type data: string + :param flags: Write mode + :type flags: string + :param encoding: encoding value for Python3 + :type encoding: string + """ + Utils.writef(self.abspath(), data, flags, encoding) + + def read_json(self, convert=True, encoding='utf-8'): + """ + Reads and parses the contents of this node as JSON (Python ≥ 2.6):: + + def build(bld): + bld.path.find_node('abc.json').read_json() + + Note that this by default automatically decodes unicode strings on Python2, unlike what the Python JSON module does. + + :type convert: boolean + :param convert: Prevents decoding of unicode strings on Python2 + :type encoding: string + :param encoding: The encoding of the file to read. This default to UTF8 as per the JSON standard + :rtype: object + :return: Parsed file contents + """ + import json # Python 2.6 and up + object_pairs_hook = None + if convert and sys.hexversion < 0x3000000: + try: + _type = unicode + except NameError: + _type = str + + def convert(value): + if isinstance(value, list): + return [convert(element) for element in value] + elif isinstance(value, _type): + return str(value) + else: + return value + + def object_pairs(pairs): + return dict((str(pair[0]), convert(pair[1])) for pair in pairs) + + object_pairs_hook = object_pairs + + return json.loads(self.read(encoding=encoding), object_pairs_hook=object_pairs_hook) + + def write_json(self, data, pretty=True): + """ + Writes a python object as JSON to disk (Python ≥ 2.6) as UTF-8 data (JSON standard):: + + def build(bld): + bld.path.find_node('xyz.json').write_json(199) + + :type data: object + :param data: The data to write to disk + :type pretty: boolean + :param pretty: Determines if the JSON will be nicely space separated + """ + import json # Python 2.6 and up + indent = 2 + separators = (',', ': ') + sort_keys = pretty + newline = os.linesep + if not pretty: + indent = None + separators = (',', ':') + newline = '' + output = json.dumps(data, indent=indent, separators=separators, sort_keys=sort_keys) + newline + self.write(output, encoding='utf-8') + + def exists(self): + """ + Returns whether the Node is present on the filesystem + + :rtype: bool + """ + return os.path.exists(self.abspath()) + + def isdir(self): + """ + Returns whether the Node represents a folder + + :rtype: bool + """ + return os.path.isdir(self.abspath()) + + def chmod(self, val): + """ + Changes the file/dir permissions:: + + def build(bld): + bld.path.chmod(493) # 0755 + """ + os.chmod(self.abspath(), val) + + def delete(self, evict=True): + """ + Removes the file/folder from the filesystem (equivalent to `rm -rf`), and remove this object from the Node tree. + Do not use this object after calling this method. + """ + try: + try: + if os.path.isdir(self.abspath()): + shutil.rmtree(self.abspath()) + else: + os.remove(self.abspath()) + except OSError: + if os.path.exists(self.abspath()): + raise + finally: + if evict: + self.evict() + + def evict(self): + """ + Removes this node from the Node tree + """ + del self.parent.children[self.name] + + def suffix(self): + """ + Returns the file rightmost extension, for example `a.b.c.d → .d` + + :rtype: string + """ + k = max(0, self.name.rfind('.')) + return self.name[k:] + + def height(self): + """ + Returns the depth in the folder hierarchy from the filesystem root or from all the file drives + + :returns: filesystem depth + :rtype: integer + """ + d = self + val = -1 + while d: + d = d.parent + val += 1 + return val + + def listdir(self): + """ + Lists the folder contents + + :returns: list of file/folder names ordered alphabetically + :rtype: list of string + """ + lst = Utils.listdir(self.abspath()) + lst.sort() + return lst + + def mkdir(self): + """ + Creates a folder represented by this node. Intermediate folders are created as needed. + + :raises: :py:class:`waflib.Errors.WafError` when the folder is missing + """ + if self.isdir(): + return + + try: + self.parent.mkdir() + except OSError: + pass + + if self.name: + try: + os.makedirs(self.abspath()) + except OSError: + pass + + if not self.isdir(): + raise Errors.WafError('Could not create the directory %r' % self) + + try: + self.children + except AttributeError: + self.children = self.dict_class() + + def find_node(self, lst): + """ + Finds a node on the file system (files or folders), and creates the corresponding Node objects if it exists + + :param lst: relative path + :type lst: string or list of string + :returns: The corresponding Node object or None if no entry was found on the filesystem + :rtype: :py:class:´waflib.Node.Node´ + """ + + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + if lst and lst[0].startswith('\\\\') and not self.parent: + node = self.ctx.root.make_node(lst[0]) + node.cache_isdir = True + return node.find_node(lst[1:]) + + cur = self + for x in lst: + if x == '..': + cur = cur.parent or cur + continue + + try: + ch = cur.children + except AttributeError: + cur.children = self.dict_class() + else: + try: + cur = ch[x] + continue + except KeyError: + pass + + # optimistic: create the node first then look if it was correct to do so + cur = self.__class__(x, cur) + if not cur.exists(): + cur.evict() + return None + + if not cur.exists(): + cur.evict() + return None + + return cur + + def make_node(self, lst): + """ + Returns or creates a Node object corresponding to the input path without considering the filesystem. + + :param lst: relative path + :type lst: string or list of string + :rtype: :py:class:´waflib.Node.Node´ + """ + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + cur = self + for x in lst: + if x == '..': + cur = cur.parent or cur + continue + + try: + cur = cur.children[x] + except AttributeError: + cur.children = self.dict_class() + except KeyError: + pass + else: + continue + cur = self.__class__(x, cur) + return cur + + def search_node(self, lst): + """ + Returns a Node previously defined in the data structure. The filesystem is not considered. + + :param lst: relative path + :type lst: string or list of string + :rtype: :py:class:´waflib.Node.Node´ or None if there is no entry in the Node datastructure + """ + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + cur = self + for x in lst: + if x == '..': + cur = cur.parent or cur + else: + try: + cur = cur.children[x] + except (AttributeError, KeyError): + return None + return cur + + def path_from(self, node): + """ + Path of this node seen from the other:: + + def build(bld): + n1 = bld.path.find_node('foo/bar/xyz.txt') + n2 = bld.path.find_node('foo/stuff/') + n1.path_from(n2) # '../bar/xyz.txt' + + :param node: path to use as a reference + :type node: :py:class:`waflib.Node.Node` + :returns: a relative path or an absolute one if that is better + :rtype: string + """ + c1 = self + c2 = node + + c1h = c1.height() + c2h = c2.height() + + lst = [] + up = 0 + + while c1h > c2h: + lst.append(c1.name) + c1 = c1.parent + c1h -= 1 + + while c2h > c1h: + up += 1 + c2 = c2.parent + c2h -= 1 + + while not c1 is c2: + lst.append(c1.name) + up += 1 + + c1 = c1.parent + c2 = c2.parent + + if c1.parent: + lst.extend(['..'] * up) + lst.reverse() + return os.sep.join(lst) or '.' + else: + return self.abspath() + + def abspath(self): + """ + Returns the absolute path. A cache is kept in the context as ``cache_node_abspath`` + + :rtype: string + """ + try: + return self.cache_abspath + except AttributeError: + pass + # think twice before touching this (performance + complexity + correctness) + + if not self.parent: + val = os.sep + elif not self.parent.name: + val = os.sep + self.name + else: + val = self.parent.abspath() + os.sep + self.name + self.cache_abspath = val + return val + + if Utils.is_win32: + def abspath(self): + try: + return self.cache_abspath + except AttributeError: + pass + if not self.parent: + val = '' + elif not self.parent.name: + val = self.name + os.sep + else: + val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name + self.cache_abspath = val + return val + + def is_child_of(self, node): + """ + Returns whether the object belongs to a subtree of the input node:: + + def build(bld): + node = bld.path.find_node('wscript') + node.is_child_of(bld.path) # True + + :param node: path to use as a reference + :type node: :py:class:`waflib.Node.Node` + :rtype: bool + """ + p = self + diff = self.height() - node.height() + while diff > 0: + diff -= 1 + p = p.parent + return p is node + + def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): + """ + Recursive method used by :py:meth:`waflib.Node.ant_glob`. + + :param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion + :type accept: function + :param maxdepth: maximum depth in the filesystem (25) + :type maxdepth: int + :param pats: list of patterns to accept and list of patterns to exclude + :type pats: tuple + :param dir: return folders too (False by default) + :type dir: bool + :param src: return files (True by default) + :type src: bool + :param remove: remove files/folders that do not exist (True by default) + :type remove: bool + :param quiet: disable build directory traversal warnings (verbose mode) + :type quiet: bool + :returns: A generator object to iterate from + :rtype: iterator + """ + dircont = self.listdir() + + try: + lst = set(self.children.keys()) + except AttributeError: + self.children = self.dict_class() + else: + if remove: + for x in lst - set(dircont): + self.children[x].evict() + + for name in dircont: + npats = accept(name, pats) + if npats and npats[0]: + accepted = [] in npats[0] + + node = self.make_node([name]) + + isdir = node.isdir() + if accepted: + if isdir: + if dir: + yield node + elif src: + yield node + + if isdir: + node.cache_isdir = True + if maxdepth: + for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove, quiet=quiet): + yield k + + def ant_glob(self, *k, **kw): + """ + Finds files across folders and returns Node objects: + + * ``**/*`` find all files recursively + * ``**/*.class`` find all files ending by .class + * ``..`` find files having two dot characters + + For example:: + + def configure(cfg): + # find all .cpp files + cfg.path.ant_glob('**/*.cpp') + # find particular files from the root filesystem (can be slow) + cfg.root.ant_glob('etc/*.txt') + # simple exclusion rule example + cfg.path.ant_glob('*.c*', excl=['*.c'], src=True, dir=False) + + For more information about the patterns, consult http://ant.apache.org/manual/dirtasks.html + Please remember that the '..' sequence does not represent the parent directory:: + + def configure(cfg): + cfg.path.ant_glob('../*.h') # incorrect + cfg.path.parent.ant_glob('*.h') # correct + + The Node structure is itself a filesystem cache, so certain precautions must + be taken while matching files in the build or installation phases. + Nodes objects that do have a corresponding file or folder are garbage-collected by default. + This garbage collection is usually required to prevent returning files that do not + exist anymore. Yet, this may also remove Node objects of files that are yet-to-be built. + + This typically happens when trying to match files in the build directory, + but there are also cases when files are created in the source directory. + Run ``waf -v`` to display any warnings, and try consider passing ``remove=False`` + when matching files in the build directory. + + Since ant_glob can traverse both source and build folders, it is a best practice + to call this method only from the most specific build node:: + + def build(bld): + # traverses the build directory, may need ``remove=False``: + bld.path.ant_glob('project/dir/**/*.h') + # better, no accidental build directory traversal: + bld.path.find_node('project/dir').ant_glob('**/*.h') # best + + In addition, files and folders are listed immediately. When matching files in the + build folders, consider passing ``generator=True`` so that the generator object + returned can defer computation to a later stage. For example:: + + def build(bld): + bld(rule='tar xvf ${SRC}', source='arch.tar') + bld.add_group() + gen = bld.bldnode.ant_glob("*.h", generator=True, remove=True) + # files will be listed only after the arch.tar is unpacked + bld(rule='ls ${SRC}', source=gen, name='XYZ') + + + :param incl: ant patterns or list of patterns to include + :type incl: string or list of strings + :param excl: ant patterns or list of patterns to exclude + :type excl: string or list of strings + :param dir: return folders too (False by default) + :type dir: bool + :param src: return files (True by default) + :type src: bool + :param maxdepth: maximum depth of recursion + :type maxdepth: int + :param ignorecase: ignore case while matching (False by default) + :type ignorecase: bool + :param generator: Whether to evaluate the Nodes lazily + :type generator: bool + :param remove: remove files/folders that do not exist (True by default) + :type remove: bool + :param quiet: disable build directory traversal warnings (verbose mode) + :type quiet: bool + :returns: The corresponding Node objects as a list or as a generator object (generator=True) + :rtype: by default, list of :py:class:`waflib.Node.Node` instances + """ + src = kw.get('src', True) + dir = kw.get('dir') + excl = kw.get('excl', exclude_regs) + incl = k and k[0] or kw.get('incl', '**') + remove = kw.get('remove', True) + maxdepth = kw.get('maxdepth', 25) + ignorecase = kw.get('ignorecase', False) + quiet = kw.get('quiet', False) + pats = (ant_matcher(incl, ignorecase), ant_matcher(excl, ignorecase)) + + if kw.get('generator'): + return Utils.lazy_generator(self.ant_iter, (ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet)) + + it = self.ant_iter(ant_sub_matcher, maxdepth, pats, dir, src, remove, quiet) + if kw.get('flat'): + # returns relative paths as a space-delimited string + # prefer Node objects whenever possible + return ' '.join(x.path_from(self) for x in it) + return list(it) + + # ---------------------------------------------------------------------------- + # the methods below require the source/build folders (bld.srcnode/bld.bldnode) + + def is_src(self): + """ + Returns True if the node is below the source directory. Note that ``!is_src() ≠is_bld()`` + + :rtype: bool + """ + cur = self + x = self.ctx.srcnode + y = self.ctx.bldnode + while cur.parent: + if cur is y: + return False + if cur is x: + return True + cur = cur.parent + return False + + def is_bld(self): + """ + Returns True if the node is below the build directory. Note that ``!is_bld() ≠is_src()`` + + :rtype: bool + """ + cur = self + y = self.ctx.bldnode + while cur.parent: + if cur is y: + return True + cur = cur.parent + return False + + def get_src(self): + """ + Returns the corresponding Node object in the source directory (or self if already + under the source directory). Use this method only if the purpose is to create + a Node object (this is common with folders but not with files, see ticket 1937) + + :rtype: :py:class:`waflib.Node.Node` + """ + cur = self + x = self.ctx.srcnode + y = self.ctx.bldnode + lst = [] + while cur.parent: + if cur is y: + lst.reverse() + return x.make_node(lst) + if cur is x: + return self + lst.append(cur.name) + cur = cur.parent + return self + + def get_bld(self): + """ + Return the corresponding Node object in the build directory (or self if already + under the build directory). Use this method only if the purpose is to create + a Node object (this is common with folders but not with files, see ticket 1937) + + :rtype: :py:class:`waflib.Node.Node` + """ + cur = self + x = self.ctx.srcnode + y = self.ctx.bldnode + lst = [] + while cur.parent: + if cur is y: + return self + if cur is x: + lst.reverse() + return self.ctx.bldnode.make_node(lst) + lst.append(cur.name) + cur = cur.parent + # the file is external to the current project, make a fake root in the current build directory + lst.reverse() + if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'): + lst[0] = lst[0][0] + return self.ctx.bldnode.make_node(['__root__'] + lst) + + def find_resource(self, lst): + """ + Use this method in the build phase to find source files corresponding to the relative path given. + + First it looks up the Node data structure to find any declared Node object in the build directory. + If None is found, it then considers the filesystem in the source directory. + + :param lst: relative path + :type lst: string or list of string + :returns: the corresponding Node object or None + :rtype: :py:class:`waflib.Node.Node` + """ + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + node = self.get_bld().search_node(lst) + if not node: + node = self.get_src().find_node(lst) + if node and node.isdir(): + return None + return node + + def find_or_declare(self, lst): + """ + Use this method in the build phase to declare output files which + are meant to be written in the build directory. + + This method creates the Node object and its parent folder + as needed. + + :param lst: relative path + :type lst: string or list of string + """ + if isinstance(lst, str) and os.path.isabs(lst): + node = self.ctx.root.make_node(lst) + else: + node = self.get_bld().make_node(lst) + node.parent.mkdir() + return node + + def find_dir(self, lst): + """ + Searches for a folder on the filesystem (see :py:meth:`waflib.Node.Node.find_node`) + + :param lst: relative path + :type lst: string or list of string + :returns: The corresponding Node object or None if there is no such folder + :rtype: :py:class:`waflib.Node.Node` + """ + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + node = self.find_node(lst) + if node and not node.isdir(): + return None + return node + + # helpers for building things + def change_ext(self, ext, ext_in=None): + """ + Declares a build node with a distinct extension; this is uses :py:meth:`waflib.Node.Node.find_or_declare` + + :return: A build node of the same path, but with a different extension + :rtype: :py:class:`waflib.Node.Node` + """ + name = self.name + if ext_in is None: + k = name.rfind('.') + if k >= 0: + name = name[:k] + ext + else: + name = name + ext + else: + name = name[:- len(ext_in)] + ext + + return self.parent.find_or_declare([name]) + + def bldpath(self): + """ + Returns the relative path seen from the build directory ``src/foo.cpp`` + + :rtype: string + """ + return self.path_from(self.ctx.bldnode) + + def srcpath(self): + """ + Returns the relative path seen from the source directory ``../src/foo.cpp`` + + :rtype: string + """ + return self.path_from(self.ctx.srcnode) + + def relpath(self): + """ + If a file in the build directory, returns :py:meth:`waflib.Node.Node.bldpath`, + else returns :py:meth:`waflib.Node.Node.srcpath` + + :rtype: string + """ + cur = self + x = self.ctx.bldnode + while cur.parent: + if cur is x: + return self.bldpath() + cur = cur.parent + return self.srcpath() + + def bld_dir(self): + """ + Equivalent to self.parent.bldpath() + + :rtype: string + """ + return self.parent.bldpath() + + def h_file(self): + """ + See :py:func:`waflib.Utils.h_file` + + :return: a hash representing the file contents + :rtype: string or bytes + """ + return Utils.h_file(self.abspath()) + + def get_bld_sig(self): + """ + Returns a signature (see :py:meth:`waflib.Node.Node.h_file`) for the purpose + of build dependency calculation. This method uses a per-context cache. + + :return: a hash representing the object contents + :rtype: string or bytes + """ + # previous behaviour can be set by returning self.ctx.node_sigs[self] when a build node + try: + cache = self.ctx.cache_sig + except AttributeError: + cache = self.ctx.cache_sig = {} + try: + ret = cache[self] + except KeyError: + p = self.abspath() + try: + ret = cache[self] = self.h_file() + except EnvironmentError: + if self.isdir(): + # allow folders as build nodes, do not use the creation time + st = os.stat(p) + ret = cache[self] = Utils.h_list([p, st.st_ino, st.st_mode]) + return ret + raise + return ret + +pickle_lock = Utils.threading.Lock() +"""Lock mandatory for thread-safe node serialization""" + +class Nod3(Node): + """Mandatory subclass for thread-safe node serialization""" + pass # do not remove + + diff --git a/third_party/waf/waflib/Options.py b/third_party/waf/waflib/Options.py new file mode 100644 index 0000000..d410491 --- /dev/null +++ b/third_party/waf/waflib/Options.py @@ -0,0 +1,359 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Scott Newton, 2005 (scottn) +# Thomas Nagy, 2006-2018 (ita) + +""" +Support for waf command-line options + +Provides default and command-line options, as well the command +that reads the ``options`` wscript function. +""" + +import os, tempfile, optparse, sys, re +from waflib import Logs, Utils, Context, Errors + +options = optparse.Values() +""" +A global dictionary representing user-provided command-line options:: + + $ waf --foo=bar +""" + +commands = [] +""" +List of commands to execute extracted from the command-line. This list +is consumed during the execution by :py:func:`waflib.Scripting.run_commands`. +""" + +envvars = [] +""" +List of environment variable declarations placed after the Waf executable name. +These are detected by searching for "=" in the remaining arguments. +You probably do not want to use this. +""" + +lockfile = os.environ.get('WAFLOCK', '.lock-waf_%s_build' % sys.platform) +""" +Name of the lock file that marks a project as configured +""" + +class opt_parser(optparse.OptionParser): + """ + Command-line options parser. + """ + def __init__(self, ctx, allow_unknown=False): + optparse.OptionParser.__init__(self, conflict_handler='resolve', add_help_option=False, + version='%s %s (%s)' % (Context.WAFNAME, Context.WAFVERSION, Context.WAFREVISION)) + self.formatter.width = Logs.get_term_cols() + self.ctx = ctx + self.allow_unknown = allow_unknown + + def _process_args(self, largs, rargs, values): + """ + Custom _process_args to allow unknown options according to the allow_unknown status + """ + while rargs: + try: + optparse.OptionParser._process_args(self,largs,rargs,values) + except (optparse.BadOptionError, optparse.AmbiguousOptionError) as e: + if self.allow_unknown: + largs.append(e.opt_str) + else: + self.error(str(e)) + + def _process_long_opt(self, rargs, values): + # --custom-option=-ftxyz is interpreted as -f -t... see #2280 + if self.allow_unknown: + back = [] + rargs + try: + optparse.OptionParser._process_long_opt(self, rargs, values) + except optparse.BadOptionError: + while rargs: + rargs.pop() + rargs.extend(back) + rargs.pop(0) + raise + else: + optparse.OptionParser._process_long_opt(self, rargs, values) + + def print_usage(self, file=None): + return self.print_help(file) + + def get_usage(self): + """ + Builds the message to print on ``waf --help`` + + :rtype: string + """ + cmds_str = {} + for cls in Context.classes: + if not cls.cmd or cls.cmd == 'options' or cls.cmd.startswith( '_' ): + continue + + s = cls.__doc__ or '' + cmds_str[cls.cmd] = s + + if Context.g_module: + for (k, v) in Context.g_module.__dict__.items(): + if k in ('options', 'init', 'shutdown'): + continue + + if type(v) is type(Context.create_context): + if v.__doc__ and not k.startswith('_'): + cmds_str[k] = v.__doc__ + + just = 0 + for k in cmds_str: + just = max(just, len(k)) + + lst = [' %s: %s' % (k.ljust(just), v) for (k, v) in cmds_str.items()] + lst.sort() + ret = '\n'.join(lst) + + return '''%s [commands] [options] + +Main commands (example: ./%s build -j4) +%s +''' % (Context.WAFNAME, Context.WAFNAME, ret) + + +class OptionsContext(Context.Context): + """ + Collects custom options from wscript files and parses the command line. + Sets the global :py:const:`waflib.Options.commands` and :py:const:`waflib.Options.options` values. + """ + cmd = 'options' + fun = 'options' + + def __init__(self, **kw): + super(OptionsContext, self).__init__(**kw) + + self.parser = opt_parser(self) + """Instance of :py:class:`waflib.Options.opt_parser`""" + + self.option_groups = {} + + jobs = self.jobs() + p = self.add_option + color = os.environ.get('NOCOLOR', '') and 'no' or 'auto' + if os.environ.get('CLICOLOR', '') == '0': + color = 'no' + elif os.environ.get('CLICOLOR_FORCE', '') == '1': + color = 'yes' + p('-c', '--color', dest='colors', default=color, action='store', help='whether to use colors (yes/no/auto) [default: auto]', choices=('yes', 'no', 'auto')) + p('-j', '--jobs', dest='jobs', default=jobs, type='int', help='amount of parallel jobs (%r)' % jobs) + p('-k', '--keep', dest='keep', default=0, action='count', help='continue despite errors (-kk to try harder)') + p('-v', '--verbose', dest='verbose', default=0, action='count', help='verbosity level -v -vv or -vvv [default: 0]') + p('--zones', dest='zones', default='', action='store', help='debugging zones (task_gen, deps, tasks, etc)') + p('--profile', dest='profile', default=0, action='store_true', help=optparse.SUPPRESS_HELP) + p('--pdb', dest='pdb', default=0, action='store_true', help=optparse.SUPPRESS_HELP) + p('-h', '--help', dest='whelp', default=0, action='store_true', help="show this help message and exit") + + gr = self.add_option_group('Configuration options') + self.option_groups['configure options'] = gr + + gr.add_option('-o', '--out', action='store', default='', help='build dir for the project', dest='out') + gr.add_option('-t', '--top', action='store', default='', help='src dir for the project', dest='top') + + gr.add_option('--no-lock-in-run', action='store_true', default=os.environ.get('NO_LOCK_IN_RUN', ''), help=optparse.SUPPRESS_HELP, dest='no_lock_in_run') + gr.add_option('--no-lock-in-out', action='store_true', default=os.environ.get('NO_LOCK_IN_OUT', ''), help=optparse.SUPPRESS_HELP, dest='no_lock_in_out') + gr.add_option('--no-lock-in-top', action='store_true', default=os.environ.get('NO_LOCK_IN_TOP', ''), help=optparse.SUPPRESS_HELP, dest='no_lock_in_top') + + default_prefix = getattr(Context.g_module, 'default_prefix', os.environ.get('PREFIX')) + if not default_prefix: + if Utils.unversioned_sys_platform() == 'win32': + d = tempfile.gettempdir() + default_prefix = d[0].upper() + d[1:] + # win32 preserves the case, but gettempdir does not + else: + default_prefix = '/usr/local/' + gr.add_option('--prefix', dest='prefix', default=default_prefix, help='installation prefix [default: %r]' % default_prefix) + gr.add_option('--bindir', dest='bindir', help='bindir') + gr.add_option('--libdir', dest='libdir', help='libdir') + + gr = self.add_option_group('Build and installation options') + self.option_groups['build and install options'] = gr + gr.add_option('-p', '--progress', dest='progress_bar', default=0, action='count', help= '-p: progress bar; -pp: ide output') + gr.add_option('--targets', dest='targets', default='', action='store', help='task generators, e.g. "target1,target2"') + + gr = self.add_option_group('Step options') + self.option_groups['step options'] = gr + gr.add_option('--files', dest='files', default='', action='store', help='files to process, by regexp, e.g. "*/main.c,*/test/main.o"') + + default_destdir = os.environ.get('DESTDIR', '') + + gr = self.add_option_group('Installation and uninstallation options') + self.option_groups['install/uninstall options'] = gr + gr.add_option('--destdir', help='installation root [default: %r]' % default_destdir, default=default_destdir, dest='destdir') + gr.add_option('-f', '--force', dest='force', default=False, action='store_true', help='force file installation') + gr.add_option('--distcheck-args', metavar='ARGS', help='arguments to pass to distcheck', default=None, action='store') + + def jobs(self): + """ + Finds the optimal amount of cpu cores to use for parallel jobs. + At runtime the options can be obtained from :py:const:`waflib.Options.options` :: + + from waflib.Options import options + njobs = options.jobs + + :return: the amount of cpu cores + :rtype: int + """ + count = int(os.environ.get('JOBS', 0)) + if count < 1: + if 'NUMBER_OF_PROCESSORS' in os.environ: + # on Windows, use the NUMBER_OF_PROCESSORS environment variable + count = int(os.environ.get('NUMBER_OF_PROCESSORS', 1)) + else: + # on everything else, first try the POSIX sysconf values + if hasattr(os, 'sysconf_names'): + if 'SC_NPROCESSORS_ONLN' in os.sysconf_names: + count = int(os.sysconf('SC_NPROCESSORS_ONLN')) + elif 'SC_NPROCESSORS_CONF' in os.sysconf_names: + count = int(os.sysconf('SC_NPROCESSORS_CONF')) + if not count and os.name not in ('nt', 'java'): + try: + tmp = self.cmd_and_log(['sysctl', '-n', 'hw.ncpu'], quiet=0) + except Errors.WafError: + pass + else: + if re.match('^[0-9]+$', tmp): + count = int(tmp) + if count < 1: + count = 1 + elif count > 1024: + count = 1024 + return count + + def add_option(self, *k, **kw): + """ + Wraps ``optparse.add_option``:: + + def options(ctx): + ctx.add_option('-u', '--use', dest='use', default=False, + action='store_true', help='a boolean option') + + :rtype: optparse option object + """ + return self.parser.add_option(*k, **kw) + + def add_option_group(self, *k, **kw): + """ + Wraps ``optparse.add_option_group``:: + + def options(ctx): + gr = ctx.add_option_group('some options') + gr.add_option('-u', '--use', dest='use', default=False, action='store_true') + + :rtype: optparse option group object + """ + try: + gr = self.option_groups[k[0]] + except KeyError: + gr = self.parser.add_option_group(*k, **kw) + self.option_groups[k[0]] = gr + return gr + + def get_option_group(self, opt_str): + """ + Wraps ``optparse.get_option_group``:: + + def options(ctx): + gr = ctx.get_option_group('configure options') + gr.add_option('-o', '--out', action='store', default='', + help='build dir for the project', dest='out') + + :rtype: optparse option group object + """ + try: + return self.option_groups[opt_str] + except KeyError: + for group in self.parser.option_groups: + if group.title == opt_str: + return group + return None + + def sanitize_path(self, path, cwd=None): + if not cwd: + cwd = Context.launch_dir + p = os.path.expanduser(path) + p = os.path.join(cwd, p) + p = os.path.normpath(p) + p = os.path.abspath(p) + return p + + def parse_cmd_args(self, _args=None, cwd=None, allow_unknown=False): + """ + Just parse the arguments + """ + self.parser.allow_unknown = allow_unknown + (options, leftover_args) = self.parser.parse_args(args=_args) + envvars = [] + commands = [] + for arg in leftover_args: + if '=' in arg: + envvars.append(arg) + elif arg != 'options': + commands.append(arg) + + if options.jobs < 1: + options.jobs = 1 + for name in 'top out destdir prefix bindir libdir'.split(): + # those paths are usually expanded from Context.launch_dir + if getattr(options, name, None): + path = self.sanitize_path(getattr(options, name), cwd) + setattr(options, name, path) + return options, commands, envvars + + def init_module_vars(self, arg_options, arg_commands, arg_envvars): + options.__dict__.clear() + del commands[:] + del envvars[:] + + options.__dict__.update(arg_options.__dict__) + commands.extend(arg_commands) + envvars.extend(arg_envvars) + + for var in envvars: + (name, value) = var.split('=', 1) + os.environ[name.strip()] = value + + def init_logs(self, options, commands, envvars): + Logs.verbose = options.verbose + if options.verbose >= 1: + self.load('errcheck') + + colors = {'yes' : 2, 'auto' : 1, 'no' : 0}[options.colors] + Logs.enable_colors(colors) + + if options.zones: + Logs.zones = options.zones.split(',') + if not Logs.verbose: + Logs.verbose = 1 + elif Logs.verbose > 0: + Logs.zones = ['runner'] + if Logs.verbose > 2: + Logs.zones = ['*'] + + def parse_args(self, _args=None): + """ + Parses arguments from a list which is not necessarily the command-line. + Initializes the module variables options, commands and envvars + If help is requested, prints it and exit the application + + :param _args: arguments + :type _args: list of strings + """ + options, commands, envvars = self.parse_cmd_args() + self.init_logs(options, commands, envvars) + self.init_module_vars(options, commands, envvars) + + def execute(self): + """ + See :py:func:`waflib.Context.Context.execute` + """ + super(OptionsContext, self).execute() + self.parse_args() + Utils.alloc_process_pool(options.jobs) + diff --git a/third_party/waf/waflib/Runner.py b/third_party/waf/waflib/Runner.py new file mode 100644 index 0000000..350c86a --- /dev/null +++ b/third_party/waf/waflib/Runner.py @@ -0,0 +1,622 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Runner.py: Task scheduling and execution +""" + +import heapq, traceback +try: + from queue import Queue, PriorityQueue +except ImportError: + from Queue import Queue + try: + from Queue import PriorityQueue + except ImportError: + class PriorityQueue(Queue): + def _init(self, maxsize): + self.maxsize = maxsize + self.queue = [] + def _put(self, item): + heapq.heappush(self.queue, item) + def _get(self): + return heapq.heappop(self.queue) + +from waflib import Utils, Task, Errors, Logs + +GAP = 5 +""" +Wait for at least ``GAP * njobs`` before trying to enqueue more tasks to run +""" + +class PriorityTasks(object): + def __init__(self): + self.lst = [] + def __len__(self): + return len(self.lst) + def __iter__(self): + return iter(self.lst) + def __str__(self): + return 'PriorityTasks: [%s]' % '\n '.join(str(x) for x in self.lst) + def clear(self): + self.lst = [] + def append(self, task): + heapq.heappush(self.lst, task) + def appendleft(self, task): + "Deprecated, do not use" + heapq.heappush(self.lst, task) + def pop(self): + return heapq.heappop(self.lst) + def extend(self, lst): + if self.lst: + for x in lst: + self.append(x) + else: + if isinstance(lst, list): + self.lst = lst + heapq.heapify(lst) + else: + self.lst = lst.lst + +class Consumer(Utils.threading.Thread): + """ + Daemon thread object that executes a task. It shares a semaphore with + the coordinator :py:class:`waflib.Runner.Spawner`. There is one + instance per task to consume. + """ + def __init__(self, spawner, task): + Utils.threading.Thread.__init__(self) + self.task = task + """Task to execute""" + self.spawner = spawner + """Coordinator object""" + self.daemon = True + self.start() + def run(self): + """ + Processes a single task + """ + try: + if not self.spawner.master.stop: + self.spawner.master.process_task(self.task) + finally: + self.spawner.sem.release() + self.spawner.master.out.put(self.task) + self.task = None + self.spawner = None + +class Spawner(Utils.threading.Thread): + """ + Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and + spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each + :py:class:`waflib.Task.Task` instance. + """ + def __init__(self, master): + Utils.threading.Thread.__init__(self) + self.master = master + """:py:class:`waflib.Runner.Parallel` producer instance""" + self.sem = Utils.threading.Semaphore(master.numjobs) + """Bounded semaphore that prevents spawning more than *n* concurrent consumers""" + self.daemon = True + self.start() + def run(self): + """ + Spawns new consumers to execute tasks by delegating to :py:meth:`waflib.Runner.Spawner.loop` + """ + try: + self.loop() + except Exception: + # Python 2 prints unnecessary messages when shutting down + # we also want to stop the thread properly + pass + def loop(self): + """ + Consumes task objects from the producer; ends when the producer has no more + task to provide. + """ + master = self.master + while 1: + task = master.ready.get() + self.sem.acquire() + if not master.stop: + task.log_display(task.generator.bld) + Consumer(self, task) + +class Parallel(object): + """ + Schedule the tasks obtained from the build context for execution. + """ + def __init__(self, bld, j=2): + """ + The initialization requires a build context reference + for computing the total number of jobs. + """ + + self.numjobs = j + """ + Amount of parallel consumers to use + """ + + self.bld = bld + """ + Instance of :py:class:`waflib.Build.BuildContext` + """ + + self.outstanding = PriorityTasks() + """Heap of :py:class:`waflib.Task.Task` that may be ready to be executed""" + + self.postponed = PriorityTasks() + """Heap of :py:class:`waflib.Task.Task` which are not ready to run for non-DAG reasons""" + + self.incomplete = set() + """List of :py:class:`waflib.Task.Task` waiting for dependent tasks to complete (DAG)""" + + self.ready = PriorityQueue(0) + """List of :py:class:`waflib.Task.Task` ready to be executed by consumers""" + + self.out = Queue(0) + """List of :py:class:`waflib.Task.Task` returned by the task consumers""" + + self.count = 0 + """Amount of tasks that may be processed by :py:class:`waflib.Runner.TaskConsumer`""" + + self.processed = 0 + """Amount of tasks processed""" + + self.stop = False + """Error flag to stop the build""" + + self.error = [] + """Tasks that could not be executed""" + + self.biter = None + """Task iterator which must give groups of parallelizable tasks when calling ``next()``""" + + self.dirty = False + """ + Flag that indicates that the build cache must be saved when a task was executed + (calls :py:meth:`waflib.Build.BuildContext.store`)""" + + self.revdeps = Utils.defaultdict(set) + """ + The reverse dependency graph of dependencies obtained from Task.run_after + """ + + self.spawner = None + """ + Coordinating daemon thread that spawns thread consumers + """ + if self.numjobs > 1: + self.spawner = Spawner(self) + + def get_next_task(self): + """ + Obtains the next Task instance to run + + :rtype: :py:class:`waflib.Task.Task` + """ + if not self.outstanding: + return None + return self.outstanding.pop() + + def postpone(self, tsk): + """ + Adds the task to the list :py:attr:`waflib.Runner.Parallel.postponed`. + The order is scrambled so as to consume as many tasks in parallel as possible. + + :param tsk: task instance + :type tsk: :py:class:`waflib.Task.Task` + """ + self.postponed.append(tsk) + + def refill_task_list(self): + """ + Pulls a next group of tasks to execute in :py:attr:`waflib.Runner.Parallel.outstanding`. + Ensures that all tasks in the current build group are complete before processing the next one. + """ + while self.count > self.numjobs * GAP: + self.get_out() + + while not self.outstanding: + if self.count: + self.get_out() + if self.outstanding: + break + elif self.postponed: + try: + cond = self.deadlock == self.processed + except AttributeError: + pass + else: + if cond: + # The most common reason is conflicting build order declaration + # for example: "X run_after Y" and "Y run_after X" + # Another can be changing "run_after" dependencies while the build is running + # for example: updating "tsk.run_after" in the "runnable_status" method + lst = [] + for tsk in self.postponed: + deps = [id(x) for x in tsk.run_after if not x.hasrun] + lst.append('%s\t-> %r' % (repr(tsk), deps)) + if not deps: + lst.append('\n task %r dependencies are done, check its *runnable_status*?' % id(tsk)) + raise Errors.WafError('Deadlock detected: check the task build order%s' % ''.join(lst)) + self.deadlock = self.processed + + if self.postponed: + self.outstanding.extend(self.postponed) + self.postponed.clear() + elif not self.count: + if self.incomplete: + for x in self.incomplete: + for k in x.run_after: + if not k.hasrun: + break + else: + # dependency added after the build started without updating revdeps + self.incomplete.remove(x) + self.outstanding.append(x) + break + else: + if self.stop or self.error: + break + raise Errors.WafError('Broken revdeps detected on %r' % self.incomplete) + else: + tasks = next(self.biter) + ready, waiting = self.prio_and_split(tasks) + self.outstanding.extend(ready) + self.incomplete.update(waiting) + self.total = self.bld.total() + break + + def add_more_tasks(self, tsk): + """ + If a task provides :py:attr:`waflib.Task.Task.more_tasks`, then the tasks contained + in that list are added to the current build and will be processed before the next build group. + + The priorities for dependent tasks are not re-calculated globally + + :param tsk: task instance + :type tsk: :py:attr:`waflib.Task.Task` + """ + if getattr(tsk, 'more_tasks', None): + more = set(tsk.more_tasks) + groups_done = set() + def iteri(a, b): + for x in a: + yield x + for x in b: + yield x + + # Update the dependency tree + # this assumes that task.run_after values were updated + for x in iteri(self.outstanding, self.incomplete): + for k in x.run_after: + if isinstance(k, Task.TaskGroup): + if k not in groups_done: + groups_done.add(k) + for j in k.prev & more: + self.revdeps[j].add(k) + elif k in more: + self.revdeps[k].add(x) + + ready, waiting = self.prio_and_split(tsk.more_tasks) + self.outstanding.extend(ready) + self.incomplete.update(waiting) + self.total += len(tsk.more_tasks) + + def mark_finished(self, tsk): + def try_unfreeze(x): + # DAG ancestors are likely to be in the incomplete set + # This assumes that the run_after contents have not changed + # after the build starts, else a deadlock may occur + if x in self.incomplete: + # TODO remove dependencies to free some memory? + # x.run_after.remove(tsk) + for k in x.run_after: + if not k.hasrun: + break + else: + self.incomplete.remove(x) + self.outstanding.append(x) + + if tsk in self.revdeps: + for x in self.revdeps[tsk]: + if isinstance(x, Task.TaskGroup): + x.prev.remove(tsk) + if not x.prev: + for k in x.next: + # TODO necessary optimization? + k.run_after.remove(x) + try_unfreeze(k) + # TODO necessary optimization? + x.next = [] + else: + try_unfreeze(x) + del self.revdeps[tsk] + + if hasattr(tsk, 'semaphore'): + sem = tsk.semaphore + try: + sem.release(tsk) + except KeyError: + # TODO + pass + else: + while sem.waiting and not sem.is_locked(): + # take a frozen task, make it ready to run + x = sem.waiting.pop() + self._add_task(x) + + def get_out(self): + """ + Waits for a Task that task consumers add to :py:attr:`waflib.Runner.Parallel.out` after execution. + Adds more Tasks if necessary through :py:attr:`waflib.Runner.Parallel.add_more_tasks`. + + :rtype: :py:attr:`waflib.Task.Task` + """ + tsk = self.out.get() + if not self.stop: + self.add_more_tasks(tsk) + self.mark_finished(tsk) + + self.count -= 1 + self.dirty = True + return tsk + + def add_task(self, tsk): + """ + Enqueue a Task to :py:attr:`waflib.Runner.Parallel.ready` so that consumers can run them. + + :param tsk: task instance + :type tsk: :py:attr:`waflib.Task.Task` + """ + # TODO change in waf 2.1 + self.ready.put(tsk) + + def _add_task(self, tsk): + if hasattr(tsk, 'semaphore'): + sem = tsk.semaphore + try: + sem.acquire(tsk) + except IndexError: + sem.waiting.add(tsk) + return + + self.count += 1 + self.processed += 1 + if self.numjobs == 1: + tsk.log_display(tsk.generator.bld) + try: + self.process_task(tsk) + finally: + self.out.put(tsk) + else: + self.add_task(tsk) + + def process_task(self, tsk): + """ + Processes a task and attempts to stop the build in case of errors + """ + tsk.process() + if tsk.hasrun != Task.SUCCESS: + self.error_handler(tsk) + + def skip(self, tsk): + """ + Mark a task as skipped/up-to-date + """ + tsk.hasrun = Task.SKIPPED + self.mark_finished(tsk) + + def cancel(self, tsk): + """ + Mark a task as failed because of unsatisfiable dependencies + """ + tsk.hasrun = Task.CANCELED + self.mark_finished(tsk) + + def error_handler(self, tsk): + """ + Called when a task cannot be executed. The flag :py:attr:`waflib.Runner.Parallel.stop` is set, + unless the build is executed with:: + + $ waf build -k + + :param tsk: task instance + :type tsk: :py:attr:`waflib.Task.Task` + """ + if not self.bld.keep: + self.stop = True + self.error.append(tsk) + + def task_status(self, tsk): + """ + Obtains the task status to decide whether to run it immediately or not. + + :return: the exit status, for example :py:attr:`waflib.Task.ASK_LATER` + :rtype: integer + """ + try: + return tsk.runnable_status() + except Exception: + self.processed += 1 + tsk.err_msg = traceback.format_exc() + if not self.stop and self.bld.keep: + self.skip(tsk) + if self.bld.keep == 1: + # if -k stop on the first exception, if -kk try to go as far as possible + if Logs.verbose > 1 or not self.error: + self.error.append(tsk) + self.stop = True + else: + if Logs.verbose > 1: + self.error.append(tsk) + return Task.EXCEPTION + + tsk.hasrun = Task.EXCEPTION + self.error_handler(tsk) + + return Task.EXCEPTION + + def start(self): + """ + Obtains Task instances from the BuildContext instance and adds the ones that need to be executed to + :py:class:`waflib.Runner.Parallel.ready` so that the :py:class:`waflib.Runner.Spawner` consumer thread + has them executed. Obtains the executed Tasks back from :py:class:`waflib.Runner.Parallel.out` + and marks the build as failed by setting the ``stop`` flag. + If only one job is used, then executes the tasks one by one, without consumers. + """ + self.total = self.bld.total() + + while not self.stop: + + self.refill_task_list() + + # consider the next task + tsk = self.get_next_task() + if not tsk: + if self.count: + # tasks may add new ones after they are run + continue + else: + # no tasks to run, no tasks running, time to exit + break + + if tsk.hasrun: + # if the task is marked as "run", just skip it + self.processed += 1 + continue + + if self.stop: # stop immediately after a failure is detected + break + + st = self.task_status(tsk) + if st == Task.RUN_ME: + self._add_task(tsk) + elif st == Task.ASK_LATER: + self.postpone(tsk) + elif st == Task.SKIP_ME: + self.processed += 1 + self.skip(tsk) + self.add_more_tasks(tsk) + elif st == Task.CANCEL_ME: + # A dependency problem has occurred, and the + # build is most likely run with `waf -k` + if Logs.verbose > 1: + self.error.append(tsk) + self.processed += 1 + self.cancel(tsk) + + # self.count represents the tasks that have been made available to the consumer threads + # collect all the tasks after an error else the message may be incomplete + while self.error and self.count: + self.get_out() + + self.ready.put(None) + if not self.stop: + assert not self.count + assert not self.postponed + assert not self.incomplete + + def prio_and_split(self, tasks): + """ + Label input tasks with priority values, and return a pair containing + the tasks that are ready to run and the tasks that are necessarily + waiting for other tasks to complete. + + The priority system is really meant as an optional layer for optimization: + dependency cycles are found quickly, and builds should be more efficient. + A high priority number means that a task is processed first. + + This method can be overridden to disable the priority system:: + + def prio_and_split(self, tasks): + return tasks, [] + + :return: A pair of task lists + :rtype: tuple + """ + # to disable: + #return tasks, [] + for x in tasks: + x.visited = 0 + + reverse = self.revdeps + + groups_done = set() + for x in tasks: + for k in x.run_after: + if isinstance(k, Task.TaskGroup): + if k not in groups_done: + groups_done.add(k) + for j in k.prev: + reverse[j].add(k) + else: + reverse[k].add(x) + + # the priority number is not the tree depth + def visit(n): + if isinstance(n, Task.TaskGroup): + return sum(visit(k) for k in n.next) + + if n.visited == 0: + n.visited = 1 + + if n in reverse: + rev = reverse[n] + n.prio_order = n.tree_weight + len(rev) + sum(visit(k) for k in rev) + else: + n.prio_order = n.tree_weight + + n.visited = 2 + elif n.visited == 1: + raise Errors.WafError('Dependency cycle found!') + return n.prio_order + + for x in tasks: + if x.visited != 0: + # must visit all to detect cycles + continue + try: + visit(x) + except Errors.WafError: + self.debug_cycles(tasks, reverse) + + ready = [] + waiting = [] + for x in tasks: + for k in x.run_after: + if not k.hasrun: + waiting.append(x) + break + else: + ready.append(x) + return (ready, waiting) + + def debug_cycles(self, tasks, reverse): + tmp = {} + for x in tasks: + tmp[x] = 0 + + def visit(n, acc): + if isinstance(n, Task.TaskGroup): + for k in n.next: + visit(k, acc) + return + if tmp[n] == 0: + tmp[n] = 1 + for k in reverse.get(n, []): + visit(k, [n] + acc) + tmp[n] = 2 + elif tmp[n] == 1: + lst = [] + for tsk in acc: + lst.append(repr(tsk)) + if tsk is n: + # exclude prior nodes, we want the minimum cycle + break + raise Errors.WafError('Task dependency cycle in "run_after" constraints: %s' % ''.join(lst)) + for x in tasks: + visit(x, []) + diff --git a/third_party/waf/waflib/Scripting.py b/third_party/waf/waflib/Scripting.py new file mode 100644 index 0000000..a80cb36 --- /dev/null +++ b/third_party/waf/waflib/Scripting.py @@ -0,0 +1,631 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +"Module called for configuring, compiling and installing targets" + +from __future__ import with_statement + +import os, shlex, shutil, traceback, errno, sys, stat +from waflib import Utils, Configure, Logs, Options, ConfigSet, Context, Errors, Build, Node + +build_dir_override = None + +no_climb_commands = ['configure'] + +default_cmd = "build" + +def waf_entry_point(current_directory, version, wafdir): + """ + This is the main entry point, all Waf execution starts here. + + :param current_directory: absolute path representing the current directory + :type current_directory: string + :param version: version number + :type version: string + :param wafdir: absolute path representing the directory of the waf library + :type wafdir: string + """ + Logs.init_log() + + if Context.WAFVERSION != version: + Logs.error('Waf script %r and library %r do not match (directory %r)', version, Context.WAFVERSION, wafdir) + sys.exit(1) + + # Store current directory before any chdir + Context.waf_dir = wafdir + Context.run_dir = Context.launch_dir = current_directory + start_dir = current_directory + no_climb = os.environ.get('NOCLIMB') + + if len(sys.argv) > 1: + # os.path.join handles absolute paths + # if sys.argv[1] is not an absolute path, then it is relative to the current working directory + potential_wscript = os.path.join(current_directory, sys.argv[1]) + if os.path.basename(potential_wscript) == Context.WSCRIPT_FILE and os.path.isfile(potential_wscript): + # need to explicitly normalize the path, as it may contain extra '/.' + path = os.path.normpath(os.path.dirname(potential_wscript)) + start_dir = os.path.abspath(path) + no_climb = True + sys.argv.pop(1) + + ctx = Context.create_context('options') + (options, commands, env) = ctx.parse_cmd_args(allow_unknown=True) + if options.top: + start_dir = Context.run_dir = Context.top_dir = options.top + no_climb = True + if options.out: + Context.out_dir = options.out + + # if 'configure' is in the commands, do not search any further + if not no_climb: + for k in no_climb_commands: + for y in commands: + if y.startswith(k): + no_climb = True + break + + # try to find a lock file (if the project was configured) + # at the same time, store the first wscript file seen + cur = start_dir + while cur: + try: + lst = os.listdir(cur) + except OSError: + lst = [] + Logs.error('Directory %r is unreadable!', cur) + if Options.lockfile in lst: + env = ConfigSet.ConfigSet() + try: + env.load(os.path.join(cur, Options.lockfile)) + ino = os.stat(cur)[stat.ST_INO] + except EnvironmentError: + pass + else: + # check if the folder was not moved + for x in (env.run_dir, env.top_dir, env.out_dir): + if not x: + continue + if Utils.is_win32: + if cur == x: + load = True + break + else: + # if the filesystem features symlinks, compare the inode numbers + try: + ino2 = os.stat(x)[stat.ST_INO] + except OSError: + pass + else: + if ino == ino2: + load = True + break + else: + Logs.warn('invalid lock file in %s', cur) + load = False + + if load: + Context.run_dir = env.run_dir + Context.top_dir = env.top_dir + Context.out_dir = env.out_dir + break + + if not Context.run_dir: + if Context.WSCRIPT_FILE in lst: + Context.run_dir = cur + + next = os.path.dirname(cur) + if next == cur: + break + cur = next + + if no_climb: + break + + wscript = os.path.normpath(os.path.join(Context.run_dir, Context.WSCRIPT_FILE)) + if not os.path.exists(wscript): + if options.whelp: + Logs.warn('These are the generic options (no wscript/project found)') + ctx.parser.print_help() + sys.exit(0) + Logs.error('Waf: Run from a folder containing a %r file (or try -h for the generic options)', Context.WSCRIPT_FILE) + sys.exit(1) + + try: + os.chdir(Context.run_dir) + except OSError: + Logs.error('Waf: The folder %r is unreadable', Context.run_dir) + sys.exit(1) + + try: + set_main_module(wscript) + except Errors.WafError as e: + Logs.pprint('RED', e.verbose_msg) + Logs.error(str(e)) + sys.exit(1) + except Exception as e: + Logs.error('Waf: The wscript in %r is unreadable', Context.run_dir) + traceback.print_exc(file=sys.stdout) + sys.exit(2) + + if options.profile: + import cProfile, pstats + cProfile.runctx('from waflib import Scripting; Scripting.run_commands()', {}, {}, 'profi.txt') + p = pstats.Stats('profi.txt') + p.sort_stats('time').print_stats(75) # or 'cumulative' + else: + try: + try: + run_commands() + except: + if options.pdb: + import pdb + type, value, tb = sys.exc_info() + traceback.print_exc() + pdb.post_mortem(tb) + else: + raise + except Errors.WafError as e: + if Logs.verbose > 1: + Logs.pprint('RED', e.verbose_msg) + Logs.error(e.msg) + sys.exit(1) + except SystemExit: + raise + except Exception as e: + traceback.print_exc(file=sys.stdout) + sys.exit(2) + except KeyboardInterrupt: + Logs.pprint('RED', 'Interrupted') + sys.exit(68) + +def set_main_module(file_path): + """ + Read the main wscript file into :py:const:`waflib.Context.Context.g_module` and + bind default functions such as ``init``, ``dist``, ``distclean`` if not defined. + Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization. + + :param file_path: absolute path representing the top-level wscript file + :type file_path: string + """ + Context.g_module = Context.load_module(file_path) + Context.g_module.root_path = file_path + + # note: to register the module globally, use the following: + # sys.modules['wscript_main'] = g_module + + def set_def(obj): + name = obj.__name__ + if not name in Context.g_module.__dict__: + setattr(Context.g_module, name, obj) + for k in (dist, distclean, distcheck): + set_def(k) + # add dummy init and shutdown functions if they're not defined + if not 'init' in Context.g_module.__dict__: + Context.g_module.init = Utils.nada + if not 'shutdown' in Context.g_module.__dict__: + Context.g_module.shutdown = Utils.nada + if not 'options' in Context.g_module.__dict__: + Context.g_module.options = Utils.nada + +def parse_options(): + """ + Parses the command-line options and initialize the logging system. + Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization. + """ + ctx = Context.create_context('options') + ctx.execute() + if not Options.commands: + if isinstance(default_cmd, list): + Options.commands.extend(default_cmd) + else: + Options.commands.append(default_cmd) + if Options.options.whelp: + ctx.parser.print_help() + sys.exit(0) + +def run_command(cmd_name): + """ + Executes a single Waf command. Called by :py:func:`waflib.Scripting.run_commands`. + + :param cmd_name: command to execute, like ``build`` + :type cmd_name: string + """ + ctx = Context.create_context(cmd_name) + ctx.log_timer = Utils.Timer() + ctx.options = Options.options # provided for convenience + ctx.cmd = cmd_name + try: + ctx.execute() + finally: + # Issue 1374 + ctx.finalize() + return ctx + +def run_commands(): + """ + Execute the Waf commands that were given on the command-line, and the other options + Called by :py:func:`waflib.Scripting.waf_entry_point` during the initialization, and executed + after :py:func:`waflib.Scripting.parse_options`. + """ + parse_options() + run_command('init') + while Options.commands: + cmd_name = Options.commands.pop(0) + ctx = run_command(cmd_name) + Logs.info('%r finished successfully (%s)', cmd_name, ctx.log_timer) + run_command('shutdown') + +########################################################################################### + +def distclean_dir(dirname): + """ + Distclean function called in the particular case when:: + + top == out + + :param dirname: absolute path of the folder to clean + :type dirname: string + """ + for (root, dirs, files) in os.walk(dirname): + for f in files: + if f.endswith(('.o', '.moc', '.exe')): + fname = os.path.join(root, f) + try: + os.remove(fname) + except OSError: + Logs.warn('Could not remove %r', fname) + + for x in (Context.DBFILE, 'config.log'): + try: + os.remove(x) + except OSError: + pass + + try: + shutil.rmtree(Build.CACHE_DIR) + except OSError: + pass + +def distclean(ctx): + '''removes build folders and data''' + + def remove_and_log(k, fun): + try: + fun(k) + except EnvironmentError as e: + if e.errno != errno.ENOENT: + Logs.warn('Could not remove %r', k) + + # remove waf cache folders on the top-level + if not Options.commands: + for k in os.listdir('.'): + for x in '.waf-2 waf-2 .waf3-2 waf3-2'.split(): + if k.startswith(x): + remove_and_log(k, shutil.rmtree) + + # remove a build folder, if any + cur = '.' + if os.environ.get('NO_LOCK_IN_TOP') or ctx.options.no_lock_in_top: + cur = ctx.options.out + + try: + lst = os.listdir(cur) + except OSError: + Logs.warn('Could not read %r', cur) + return + + if Options.lockfile in lst: + f = os.path.join(cur, Options.lockfile) + try: + env = ConfigSet.ConfigSet(f) + except EnvironmentError: + Logs.warn('Could not read %r', f) + return + + if not env.out_dir or not env.top_dir: + Logs.warn('Invalid lock file %r', f) + return + + if env.out_dir == env.top_dir: + distclean_dir(env.out_dir) + else: + remove_and_log(env.out_dir, shutil.rmtree) + + env_dirs = [env.out_dir] + if not (os.environ.get('NO_LOCK_IN_TOP') or ctx.options.no_lock_in_top): + env_dirs.append(env.top_dir) + if not (os.environ.get('NO_LOCK_IN_RUN') or ctx.options.no_lock_in_run): + env_dirs.append(env.run_dir) + for k in env_dirs: + p = os.path.join(k, Options.lockfile) + remove_and_log(p, os.remove) + +class Dist(Context.Context): + '''creates an archive containing the project source code''' + cmd = 'dist' + fun = 'dist' + algo = 'tar.bz2' + ext_algo = {} + + def execute(self): + """ + See :py:func:`waflib.Context.Context.execute` + """ + self.recurse([os.path.dirname(Context.g_module.root_path)]) + self.archive() + + def archive(self): + """ + Creates the source archive. + """ + import tarfile + + arch_name = self.get_arch_name() + + try: + self.base_path + except AttributeError: + self.base_path = self.path + + node = self.base_path.make_node(arch_name) + try: + node.delete() + except OSError: + pass + + files = self.get_files() + + if self.algo.startswith('tar.'): + tar = tarfile.open(node.abspath(), 'w:' + self.algo.replace('tar.', '')) + + for x in files: + self.add_tar_file(x, tar) + tar.close() + elif self.algo == 'zip': + import zipfile + zip = zipfile.ZipFile(node.abspath(), 'w', compression=zipfile.ZIP_DEFLATED) + + for x in files: + archive_name = self.get_base_name() + '/' + x.path_from(self.base_path) + if os.environ.get('SOURCE_DATE_EPOCH'): + # TODO: parse that timestamp + zip.writestr(zipfile.ZipInfo(archive_name), x.read(), zipfile.ZIP_DEFLATED) + else: + zip.write(x.abspath(), archive_name, zipfile.ZIP_DEFLATED) + zip.close() + else: + self.fatal('Valid algo types are tar.bz2, tar.gz, tar.xz or zip') + + try: + from hashlib import sha256 + except ImportError: + digest = '' + else: + digest = ' (sha256=%r)' % sha256(node.read(flags='rb')).hexdigest() + + Logs.info('New archive created: %s%s', self.arch_name, digest) + + def get_tar_path(self, node): + """ + Return the path to use for a node in the tar archive, the purpose of this + is to let subclases resolve symbolic links or to change file names + + :return: absolute path + :rtype: string + """ + return node.abspath() + + def add_tar_file(self, x, tar): + """ + Adds a file to the tar archive. Symlinks are not verified. + + :param x: file path + :param tar: tar file object + """ + p = self.get_tar_path(x) + tinfo = tar.gettarinfo(name=p, arcname=self.get_tar_prefix() + '/' + x.path_from(self.base_path)) + tinfo.uid = 0 + tinfo.gid = 0 + tinfo.uname = 'root' + tinfo.gname = 'root' + if os.environ.get('SOURCE_DATE_EPOCH'): + tinfo.mtime = int(os.environ.get('SOURCE_DATE_EPOCH')) + + if os.path.isfile(p): + with open(p, 'rb') as f: + tar.addfile(tinfo, fileobj=f) + else: + tar.addfile(tinfo) + + def get_tar_prefix(self): + """ + Returns the base path for files added into the archive tar file + + :rtype: string + """ + try: + return self.tar_prefix + except AttributeError: + return self.get_base_name() + + def get_arch_name(self): + """ + Returns the archive file name. + Set the attribute *arch_name* to change the default value:: + + def dist(ctx): + ctx.arch_name = 'ctx.tar.bz2' + + :rtype: string + """ + try: + self.arch_name + except AttributeError: + self.arch_name = self.get_base_name() + '.' + self.ext_algo.get(self.algo, self.algo) + return self.arch_name + + def get_base_name(self): + """ + Returns the default name of the main directory in the archive, which is set to *appname-version*. + Set the attribute *base_name* to change the default value:: + + def dist(ctx): + ctx.base_name = 'files' + + :rtype: string + """ + try: + self.base_name + except AttributeError: + appname = getattr(Context.g_module, Context.APPNAME, 'noname') + version = getattr(Context.g_module, Context.VERSION, '1.0') + self.base_name = appname + '-' + version + return self.base_name + + def get_excl(self): + """ + Returns the patterns to exclude for finding the files in the top-level directory. + Set the attribute *excl* to change the default value:: + + def dist(ctx): + ctx.excl = 'build **/*.o **/*.class' + + :rtype: string + """ + try: + return self.excl + except AttributeError: + self.excl = Node.exclude_regs + ' **/waf-2.* **/.waf-2.* **/waf3-2.* **/.waf3-2.* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*' + if Context.out_dir: + nd = self.root.find_node(Context.out_dir) + if nd: + self.excl += ' ' + nd.path_from(self.base_path) + return self.excl + + def get_files(self): + """ + Files to package are searched automatically by :py:func:`waflib.Node.Node.ant_glob`. + Set *files* to prevent this behaviour:: + + def dist(ctx): + ctx.files = ctx.path.find_node('wscript') + + Files are also searched from the directory 'base_path', to change it, set:: + + def dist(ctx): + ctx.base_path = path + + :rtype: list of :py:class:`waflib.Node.Node` + """ + try: + files = self.files + except AttributeError: + files = self.base_path.ant_glob('**/*', excl=self.get_excl()) + return files + +def dist(ctx): + '''makes a tarball for redistributing the sources''' + pass + +class DistCheck(Dist): + """creates an archive with dist, then tries to build it""" + fun = 'distcheck' + cmd = 'distcheck' + + def execute(self): + """ + See :py:func:`waflib.Context.Context.execute` + """ + self.recurse([os.path.dirname(Context.g_module.root_path)]) + self.archive() + self.check() + + def make_distcheck_cmd(self, tmpdir): + cfg = [] + if Options.options.distcheck_args: + cfg = shlex.split(Options.options.distcheck_args) + else: + cfg = [x for x in sys.argv if x.startswith('-')] + cmd = [sys.executable, sys.argv[0], 'configure', 'build', 'install', 'uninstall', '--destdir=' + tmpdir] + cfg + return cmd + + def check(self): + """ + Creates the archive, uncompresses it and tries to build the project + """ + import tempfile, tarfile + + with tarfile.open(self.get_arch_name()) as t: + for x in t: + t.extract(x) + + instdir = tempfile.mkdtemp('.inst', self.get_base_name()) + cmd = self.make_distcheck_cmd(instdir) + ret = Utils.subprocess.Popen(cmd, cwd=self.get_base_name()).wait() + if ret: + raise Errors.WafError('distcheck failed with code %r' % ret) + + if os.path.exists(instdir): + raise Errors.WafError('distcheck succeeded, but files were left in %s' % instdir) + + shutil.rmtree(self.get_base_name()) + + +def distcheck(ctx): + '''checks if the project compiles (tarball from 'dist')''' + pass + +def autoconfigure(execute_method): + """ + Decorator that enables context commands to run *configure* as needed. + """ + def execute(self): + """ + Wraps :py:func:`waflib.Context.Context.execute` on the context class + """ + if not Configure.autoconfig: + return execute_method(self) + + env = ConfigSet.ConfigSet() + do_config = False + try: + env.load(os.path.join(Context.top_dir, Options.lockfile)) + except EnvironmentError: + Logs.warn('Configuring the project') + do_config = True + else: + if env.run_dir != Context.run_dir: + do_config = True + else: + h = 0 + for f in env.files: + try: + h = Utils.h_list((h, Utils.readf(f, 'rb'))) + except EnvironmentError: + do_config = True + break + else: + do_config = h != env.hash + + if do_config: + cmd = env.config_cmd or 'configure' + if Configure.autoconfig == 'clobber': + tmp = Options.options.__dict__ + launch_dir_tmp = Context.launch_dir + if env.options: + Options.options.__dict__ = env.options + Context.launch_dir = env.launch_dir + try: + run_command(cmd) + finally: + Options.options.__dict__ = tmp + Context.launch_dir = launch_dir_tmp + else: + run_command(cmd) + run_command(self.cmd) + else: + return execute_method(self) + return execute +Build.BuildContext.execute = autoconfigure(Build.BuildContext.execute) + diff --git a/third_party/waf/waflib/Task.py b/third_party/waf/waflib/Task.py new file mode 100644 index 0000000..cb49a73 --- /dev/null +++ b/third_party/waf/waflib/Task.py @@ -0,0 +1,1406 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Tasks represent atomic operations such as processes. +""" + +import os, re, sys, tempfile, traceback +from waflib import Utils, Logs, Errors + +# task states +NOT_RUN = 0 +"""The task was not executed yet""" + +MISSING = 1 +"""The task has been executed but the files have not been created""" + +CRASHED = 2 +"""The task execution returned a non-zero exit status""" + +EXCEPTION = 3 +"""An exception occurred in the task execution""" + +CANCELED = 4 +"""A dependency for the task is missing so it was cancelled""" + +SKIPPED = 8 +"""The task did not have to be executed""" + +SUCCESS = 9 +"""The task was successfully executed""" + +ASK_LATER = -1 +"""The task is not ready to be executed""" + +SKIP_ME = -2 +"""The task does not need to be executed""" + +RUN_ME = -3 +"""The task must be executed""" + +CANCEL_ME = -4 +"""The task cannot be executed because of a dependency problem""" + +COMPILE_TEMPLATE_SHELL = ''' +def f(tsk): + env = tsk.env + gen = tsk.generator + bld = gen.bld + cwdx = tsk.get_cwd() + p = env.get_flat + def to_list(xx): + if isinstance(xx, str): return [xx] + return xx + tsk.last_cmd = cmd = \'\'\' %s \'\'\' % s + return tsk.exec_command(cmd, cwd=cwdx, env=env.env or None) +''' + +COMPILE_TEMPLATE_NOSHELL = ''' +def f(tsk): + env = tsk.env + gen = tsk.generator + bld = gen.bld + cwdx = tsk.get_cwd() + def to_list(xx): + if isinstance(xx, str): return [xx] + return xx + def merge(lst1, lst2): + if lst1 and lst2: + return lst1[:-1] + [lst1[-1] + lst2[0]] + lst2[1:] + return lst1 + lst2 + lst = [] + %s + if '' in lst: + lst = [x for x in lst if x] + tsk.last_cmd = lst + return tsk.exec_command(lst, cwd=cwdx, env=env.env or None) +''' + +COMPILE_TEMPLATE_SIG_VARS = ''' +def f(tsk): + sig = tsk.generator.bld.hash_env_vars(tsk.env, tsk.vars) + tsk.m.update(sig) + env = tsk.env + gen = tsk.generator + bld = gen.bld + cwdx = tsk.get_cwd() + p = env.get_flat + buf = [] + %s + tsk.m.update(repr(buf).encode()) +''' + +classes = {} +""" +The metaclass :py:class:`waflib.Task.store_task_type` stores all class tasks +created by user scripts or Waf tools to this dict. It maps class names to class objects. +""" + +class store_task_type(type): + """ + Metaclass: store the task classes into the dict pointed by the + class attribute 'register' which defaults to :py:const:`waflib.Task.classes`, + + The attribute 'run_str' is compiled into a method 'run' bound to the task class. + """ + def __init__(cls, name, bases, dict): + super(store_task_type, cls).__init__(name, bases, dict) + name = cls.__name__ + + if name != 'evil' and name != 'Task': + if getattr(cls, 'run_str', None): + # if a string is provided, convert it to a method + (f, dvars) = compile_fun(cls.run_str, cls.shell) + cls.hcode = Utils.h_cmd(cls.run_str) + cls.orig_run_str = cls.run_str + # change the name of run_str or it is impossible to subclass with a function + cls.run_str = None + cls.run = f + # process variables + cls.vars = list(set(cls.vars + dvars)) + cls.vars.sort() + if cls.vars: + fun = compile_sig_vars(cls.vars) + if fun: + cls.sig_vars = fun + elif getattr(cls, 'run', None) and not 'hcode' in cls.__dict__: + # getattr(cls, 'hcode') would look in the upper classes + cls.hcode = Utils.h_cmd(cls.run) + + # be creative + getattr(cls, 'register', classes)[name] = cls + +evil = store_task_type('evil', (object,), {}) +"Base class provided to avoid writing a metaclass, so the code can run in python 2.6 and 3.x unmodified" + +class Task(evil): + """ + Task objects represents actions to perform such as commands to execute by calling the `run` method. + + Detecting when to execute a task occurs in the method :py:meth:`waflib.Task.Task.runnable_status`. + + Detecting which tasks to execute is performed through a hash value returned by + :py:meth:`waflib.Task.Task.signature`. The task signature is persistent from build to build. + """ + vars = [] + """ConfigSet variables that should trigger a rebuild (class attribute used for :py:meth:`waflib.Task.Task.sig_vars`)""" + + always_run = False + """Specify whether task instances must always be executed or not (class attribute)""" + + shell = False + """Execute the command with the shell (class attribute)""" + + color = 'GREEN' + """Color for the console display, see :py:const:`waflib.Logs.colors_lst`""" + + ext_in = [] + """File extensions that objects of this task class may use""" + + ext_out = [] + """File extensions that objects of this task class may create""" + + before = [] + """The instances of this class are executed before the instances of classes whose names are in this list""" + + after = [] + """The instances of this class are executed after the instances of classes whose names are in this list""" + + hcode = Utils.SIG_NIL + """String representing an additional hash for the class representation""" + + keep_last_cmd = False + """Whether to keep the last command executed on the instance after execution. + This may be useful for certain extensions but it can a lot of memory. + """ + + weight = 0 + """Optional weight to tune the priority for task instances. + The higher, the earlier. The weight only applies to single task objects.""" + + tree_weight = 0 + """Optional weight to tune the priority of task instances and whole subtrees. + The higher, the earlier.""" + + prio_order = 0 + """Priority order set by the scheduler on instances during the build phase. + You most likely do not need to set it. + """ + + __slots__ = ('hasrun', 'generator', 'env', 'inputs', 'outputs', 'dep_nodes', 'run_after') + + def __init__(self, *k, **kw): + self.hasrun = NOT_RUN + try: + self.generator = kw['generator'] + except KeyError: + self.generator = self + + self.env = kw['env'] + """:py:class:`waflib.ConfigSet.ConfigSet` object (make sure to provide one)""" + + self.inputs = [] + """List of input nodes, which represent the files used by the task instance""" + + self.outputs = [] + """List of output nodes, which represent the files created by the task instance""" + + self.dep_nodes = [] + """List of additional nodes to depend on""" + + self.run_after = set() + """Set of tasks that must be executed before this one""" + + def __lt__(self, other): + return self.priority() > other.priority() + def __le__(self, other): + return self.priority() >= other.priority() + def __gt__(self, other): + return self.priority() < other.priority() + def __ge__(self, other): + return self.priority() <= other.priority() + + def get_cwd(self): + """ + :return: current working directory + :rtype: :py:class:`waflib.Node.Node` + """ + bld = self.generator.bld + ret = getattr(self, 'cwd', None) or getattr(bld, 'cwd', bld.bldnode) + if isinstance(ret, str): + if os.path.isabs(ret): + ret = bld.root.make_node(ret) + else: + ret = self.generator.path.make_node(ret) + return ret + + def quote_flag(self, x): + """ + Surround a process argument by quotes so that a list of arguments can be written to a file + + :param x: flag + :type x: string + :return: quoted flag + :rtype: string + """ + old = x + if '\\' in x: + x = x.replace('\\', '\\\\') + if '"' in x: + x = x.replace('"', '\\"') + if old != x or ' ' in x or '\t' in x or "'" in x: + x = '"%s"' % x + return x + + def priority(self): + """ + Priority of execution; the higher, the earlier + + :return: the priority value + :rtype: a tuple of numeric values + """ + return (self.weight + self.prio_order, - getattr(self.generator, 'tg_idx_count', 0)) + + def split_argfile(self, cmd): + """ + Splits a list of process commands into the executable part and its list of arguments + + :return: a tuple containing the executable first and then the rest of arguments + :rtype: tuple + """ + return ([cmd[0]], [self.quote_flag(x) for x in cmd[1:]]) + + def exec_command(self, cmd, **kw): + """ + Wrapper for :py:meth:`waflib.Context.Context.exec_command`. + This version set the current working directory (``build.variant_dir``), + applies PATH settings (if self.env.PATH is provided), and can run long + commands through a temporary ``@argfile``. + + :param cmd: process command to execute + :type cmd: list of string (best) or string (process will use a shell) + :return: the return code + :rtype: int + + Optional parameters: + + #. cwd: current working directory (Node or string) + #. stdout: set to None to prevent waf from capturing the process standard output + #. stderr: set to None to prevent waf from capturing the process standard error + #. timeout: timeout value (Python 3) + """ + if not 'cwd' in kw: + kw['cwd'] = self.get_cwd() + + if hasattr(self, 'timeout'): + kw['timeout'] = self.timeout + + if self.env.PATH: + env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) + env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH) + + if hasattr(self, 'stdout'): + kw['stdout'] = self.stdout + if hasattr(self, 'stderr'): + kw['stderr'] = self.stderr + + if not isinstance(cmd, str): + if Utils.is_win32: + # win32 compares the resulting length http://support.microsoft.com/kb/830473 + too_long = sum([len(arg) for arg in cmd]) + len(cmd) > 8192 + else: + # non-win32 counts the amount of arguments (200k) + too_long = len(cmd) > 200000 + + if too_long and getattr(self, 'allow_argsfile', True): + # Shunt arguments to a temporary file if the command is too long. + cmd, args = self.split_argfile(cmd) + try: + (fd, tmp) = tempfile.mkstemp() + os.write(fd, '\r\n'.join(args).encode()) + os.close(fd) + if Logs.verbose: + Logs.debug('argfile: @%r -> %r', tmp, args) + return self.generator.bld.exec_command(cmd + ['@' + tmp], **kw) + finally: + try: + os.remove(tmp) + except OSError: + # anti-virus and indexers can keep files open -_- + pass + return self.generator.bld.exec_command(cmd, **kw) + + def process(self): + """ + Runs the task and handles errors + + :return: 0 or None if everything is fine + :rtype: integer + """ + # remove the task signature immediately before it is executed + # so that the task will be executed again in case of failure + try: + del self.generator.bld.task_sigs[self.uid()] + except KeyError: + pass + + try: + ret = self.run() + except Exception: + self.err_msg = traceback.format_exc() + self.hasrun = EXCEPTION + else: + if ret: + self.err_code = ret + self.hasrun = CRASHED + else: + try: + self.post_run() + except Errors.WafError: + pass + except Exception: + self.err_msg = traceback.format_exc() + self.hasrun = EXCEPTION + else: + self.hasrun = SUCCESS + + if self.hasrun != SUCCESS and self.scan: + # rescan dependencies on next run + try: + del self.generator.bld.imp_sigs[self.uid()] + except KeyError: + pass + + def log_display(self, bld): + "Writes the execution status on the context logger" + if self.generator.bld.progress_bar == 3: + return + + s = self.display() + if s: + if bld.logger: + logger = bld.logger + else: + logger = Logs + + if self.generator.bld.progress_bar == 1: + c1 = Logs.colors.cursor_off + c2 = Logs.colors.cursor_on + logger.info(s, extra={'stream': sys.stderr, 'terminator':'', 'c1': c1, 'c2' : c2}) + else: + logger.info(s, extra={'terminator':'', 'c1': '', 'c2' : ''}) + + def display(self): + """ + Returns an execution status for the console, the progress bar, or the IDE output. + + :rtype: string + """ + col1 = Logs.colors(self.color) + col2 = Logs.colors.NORMAL + master = self.generator.bld.producer + + def cur(): + # the current task position, computed as late as possible + return master.processed - master.ready.qsize() + + if self.generator.bld.progress_bar == 1: + return self.generator.bld.progress_line(cur(), master.total, col1, col2) + + if self.generator.bld.progress_bar == 2: + ela = str(self.generator.bld.timer) + try: + ins = ','.join([n.name for n in self.inputs]) + except AttributeError: + ins = '' + try: + outs = ','.join([n.name for n in self.outputs]) + except AttributeError: + outs = '' + return '|Total %s|Current %s|Inputs %s|Outputs %s|Time %s|\n' % (master.total, cur(), ins, outs, ela) + + s = str(self) + if not s: + return None + + total = master.total + n = len(str(total)) + fs = '[%%%dd/%%%dd] %%s%%s%%s%%s\n' % (n, n) + kw = self.keyword() + if kw: + kw += ' ' + return fs % (cur(), total, kw, col1, s, col2) + + def hash_constraints(self): + """ + Identifies a task type for all the constraints relevant for the scheduler: precedence, file production + + :return: a hash value + :rtype: string + """ + return (tuple(self.before), tuple(self.after), tuple(self.ext_in), tuple(self.ext_out), self.__class__.__name__, self.hcode) + + def format_error(self): + """ + Returns an error message to display the build failure reasons + + :rtype: string + """ + if Logs.verbose: + msg = ': %r\n%r' % (self, getattr(self, 'last_cmd', '')) + else: + msg = ' (run with -v to display more information)' + name = getattr(self.generator, 'name', '') + if getattr(self, "err_msg", None): + return self.err_msg + elif not self.hasrun: + return 'task in %r was not executed for some reason: %r' % (name, self) + elif self.hasrun == CRASHED: + try: + return ' -> task in %r failed with exit status %r%s' % (name, self.err_code, msg) + except AttributeError: + return ' -> task in %r failed%s' % (name, msg) + elif self.hasrun == MISSING: + return ' -> missing files in %r%s' % (name, msg) + elif self.hasrun == CANCELED: + return ' -> %r canceled because of missing dependencies' % name + else: + return 'invalid status for task in %r: %r' % (name, self.hasrun) + + def colon(self, var1, var2): + """ + Enable scriptlet expressions of the form ${FOO_ST:FOO} + If the first variable (FOO_ST) is empty, then an empty list is returned + + The results will be slightly different if FOO_ST is a list, for example:: + + env.FOO = ['p1', 'p2'] + env.FOO_ST = '-I%s' + # ${FOO_ST:FOO} returns + ['-Ip1', '-Ip2'] + + env.FOO_ST = ['-a', '-b'] + # ${FOO_ST:FOO} returns + ['-a', '-b', 'p1', '-a', '-b', 'p2'] + """ + tmp = self.env[var1] + if not tmp: + return [] + + if isinstance(var2, str): + it = self.env[var2] + else: + it = var2 + if isinstance(tmp, str): + return [tmp % x for x in it] + else: + lst = [] + for y in it: + lst.extend(tmp) + lst.append(y) + return lst + + def __str__(self): + "string to display to the user" + name = self.__class__.__name__ + if self.outputs: + if name.endswith(('lib', 'program')) or not self.inputs: + node = self.outputs[0] + return node.path_from(node.ctx.launch_node()) + if not (self.inputs or self.outputs): + return self.__class__.__name__ + if len(self.inputs) == 1: + node = self.inputs[0] + return node.path_from(node.ctx.launch_node()) + + src_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.inputs]) + tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) + if self.outputs: + sep = ' -> ' + else: + sep = '' + return '%s: %s%s%s' % (self.__class__.__name__, src_str, sep, tgt_str) + + def keyword(self): + "Display keyword used to prettify the console outputs" + name = self.__class__.__name__ + if name.endswith(('lib', 'program')): + return 'Linking' + if len(self.inputs) == 1 and len(self.outputs) == 1: + return 'Compiling' + if not self.inputs: + if self.outputs: + return 'Creating' + else: + return 'Running' + return 'Processing' + + def __repr__(self): + "for debugging purposes" + try: + ins = ",".join([x.name for x in self.inputs]) + outs = ",".join([x.name for x in self.outputs]) + except AttributeError: + ins = ",".join([str(x) for x in self.inputs]) + outs = ",".join([str(x) for x in self.outputs]) + return "".join(['\n\t{task %r: ' % id(self), self.__class__.__name__, " ", ins, " -> ", outs, '}']) + + def uid(self): + """ + Returns an identifier used to determine if tasks are up-to-date. Since the + identifier will be stored between executions, it must be: + + - unique for a task: no two tasks return the same value (for a given build context) + - the same for a given task instance + + By default, the node paths, the class name, and the function are used + as inputs to compute a hash. + + The pointer to the object (python built-in 'id') will change between build executions, + and must be avoided in such hashes. + + :return: hash value + :rtype: string + """ + try: + return self.uid_ + except AttributeError: + m = Utils.md5(self.__class__.__name__) + up = m.update + for x in self.inputs + self.outputs: + up(x.abspath()) + self.uid_ = m.digest() + return self.uid_ + + def set_inputs(self, inp): + """ + Appends the nodes to the *inputs* list + + :param inp: input nodes + :type inp: node or list of nodes + """ + if isinstance(inp, list): + self.inputs += inp + else: + self.inputs.append(inp) + + def set_outputs(self, out): + """ + Appends the nodes to the *outputs* list + + :param out: output nodes + :type out: node or list of nodes + """ + if isinstance(out, list): + self.outputs += out + else: + self.outputs.append(out) + + def set_run_after(self, task): + """ + Run this task only after the given *task*. + + Calling this method from :py:meth:`waflib.Task.Task.runnable_status` may cause + build deadlocks; see :py:meth:`waflib.Tools.fc.fc.runnable_status` for details. + + :param task: task + :type task: :py:class:`waflib.Task.Task` + """ + assert isinstance(task, Task) + self.run_after.add(task) + + def signature(self): + """ + Task signatures are stored between build executions, they are use to track the changes + made to the input nodes (not to the outputs!). The signature hashes data from various sources: + + * explicit dependencies: files listed in the inputs (list of node objects) :py:meth:`waflib.Task.Task.sig_explicit_deps` + * implicit dependencies: list of nodes returned by scanner methods (when present) :py:meth:`waflib.Task.Task.sig_implicit_deps` + * hashed data: variables/values read from task.vars/task.env :py:meth:`waflib.Task.Task.sig_vars` + + If the signature is expected to give a different result, clear the cache kept in ``self.cache_sig``:: + + from waflib import Task + class cls(Task.Task): + def signature(self): + sig = super(Task.Task, self).signature() + delattr(self, 'cache_sig') + return super(Task.Task, self).signature() + + :return: the signature value + :rtype: string or bytes + """ + try: + return self.cache_sig + except AttributeError: + pass + + self.m = Utils.md5(self.hcode) + + # explicit deps + self.sig_explicit_deps() + + # env vars + self.sig_vars() + + # implicit deps / scanner results + if self.scan: + try: + self.sig_implicit_deps() + except Errors.TaskRescan: + return self.signature() + + ret = self.cache_sig = self.m.digest() + return ret + + def runnable_status(self): + """ + Returns the Task status + + :return: a task state in :py:const:`waflib.Task.RUN_ME`, + :py:const:`waflib.Task.SKIP_ME`, :py:const:`waflib.Task.CANCEL_ME` or :py:const:`waflib.Task.ASK_LATER`. + :rtype: int + """ + bld = self.generator.bld + if bld.is_install < 0: + return SKIP_ME + + for t in self.run_after: + if not t.hasrun: + return ASK_LATER + elif t.hasrun < SKIPPED: + # a dependency has an error + return CANCEL_ME + + # first compute the signature + try: + new_sig = self.signature() + except Errors.TaskNotReady: + return ASK_LATER + + # compare the signature to a signature computed previously + key = self.uid() + try: + prev_sig = bld.task_sigs[key] + except KeyError: + Logs.debug('task: task %r must run: it was never run before or the task code changed', self) + return RUN_ME + + if new_sig != prev_sig: + Logs.debug('task: task %r must run: the task signature changed', self) + return RUN_ME + + # compare the signatures of the outputs + for node in self.outputs: + sig = bld.node_sigs.get(node) + if not sig: + Logs.debug('task: task %r must run: an output node has no signature', self) + return RUN_ME + if sig != key: + Logs.debug('task: task %r must run: an output node was produced by another task', self) + return RUN_ME + if not node.exists(): + Logs.debug('task: task %r must run: an output node does not exist', self) + return RUN_ME + + return (self.always_run and RUN_ME) or SKIP_ME + + def post_run(self): + """ + Called after successful execution to record that the task has run by + updating the entry in :py:attr:`waflib.Build.BuildContext.task_sigs`. + """ + bld = self.generator.bld + for node in self.outputs: + if not node.exists(): + self.hasrun = MISSING + self.err_msg = '-> missing file: %r' % node.abspath() + raise Errors.WafError(self.err_msg) + bld.node_sigs[node] = self.uid() # make sure this task produced the files in question + bld.task_sigs[self.uid()] = self.signature() + if not self.keep_last_cmd: + try: + del self.last_cmd + except AttributeError: + pass + + def sig_explicit_deps(self): + """ + Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.inputs` + and :py:attr:`waflib.Task.Task.dep_nodes` signatures. + """ + bld = self.generator.bld + upd = self.m.update + + # the inputs + for x in self.inputs + self.dep_nodes: + upd(x.get_bld_sig()) + + # manual dependencies, they can slow down the builds + if bld.deps_man: + additional_deps = bld.deps_man + for x in self.inputs + self.outputs: + try: + d = additional_deps[x] + except KeyError: + continue + + for v in d: + try: + v = v.get_bld_sig() + except AttributeError: + if hasattr(v, '__call__'): + v = v() # dependency is a function, call it + upd(v) + + def sig_deep_inputs(self): + """ + Enable rebuilds on input files task signatures. Not used by default. + + Example: hashes of output programs can be unchanged after being re-linked, + despite the libraries being different. This method can thus prevent stale unit test + results (waf_unit_test.py). + + Hashing input file timestamps is another possibility for the implementation. + This may cause unnecessary rebuilds when input tasks are frequently executed. + Here is an implementation example:: + + lst = [] + for node in self.inputs + self.dep_nodes: + st = os.stat(node.abspath()) + lst.append(st.st_mtime) + lst.append(st.st_size) + self.m.update(Utils.h_list(lst)) + + The downside of the implementation is that it absolutely requires all build directory + files to be declared within the current build. + """ + bld = self.generator.bld + lst = [bld.task_sigs[bld.node_sigs[node]] for node in (self.inputs + self.dep_nodes) if node.is_bld()] + self.m.update(Utils.h_list(lst)) + + def sig_vars(self): + """ + Used by :py:meth:`waflib.Task.Task.signature`; it hashes :py:attr:`waflib.Task.Task.env` variables/values + When overriding this method, and if scriptlet expressions are used, make sure to follow + the code in :py:meth:`waflib.Task.Task.compile_sig_vars` to enable dependencies on scriptlet results. + + This method may be replaced on subclasses by the metaclass to force dependencies on scriptlet code. + """ + sig = self.generator.bld.hash_env_vars(self.env, self.vars) + self.m.update(sig) + + scan = None + """ + This method, when provided, returns a tuple containing: + + * a list of nodes corresponding to real files + * a list of names for files not found in path_lst + + For example:: + + from waflib.Task import Task + class mytask(Task): + def scan(self, node): + return ([], []) + + The first and second lists in the tuple are stored in :py:attr:`waflib.Build.BuildContext.node_deps` and + :py:attr:`waflib.Build.BuildContext.raw_deps` respectively. + """ + + def sig_implicit_deps(self): + """ + Used by :py:meth:`waflib.Task.Task.signature`; it hashes node signatures + obtained by scanning for dependencies (:py:meth:`waflib.Task.Task.scan`). + + The exception :py:class:`waflib.Errors.TaskRescan` is thrown + when a file has changed. In this case, the method :py:meth:`waflib.Task.Task.signature` is called + once again, and return here to call :py:meth:`waflib.Task.Task.scan` and searching for dependencies. + """ + bld = self.generator.bld + + # get the task signatures from previous runs + key = self.uid() + prev = bld.imp_sigs.get(key, []) + + # for issue #379 + if prev: + try: + if prev == self.compute_sig_implicit_deps(): + return prev + except Errors.TaskNotReady: + raise + except EnvironmentError: + # when a file was renamed, remove the stale nodes (headers in folders without source files) + # this will break the order calculation for headers created during the build in the source directory (should be uncommon) + # the behaviour will differ when top != out + for x in bld.node_deps.get(self.uid(), []): + if not x.is_bld() and not x.exists(): + try: + del x.parent.children[x.name] + except KeyError: + pass + del bld.imp_sigs[key] + raise Errors.TaskRescan('rescan') + + # no previous run or the signature of the dependencies has changed, rescan the dependencies + (bld.node_deps[key], bld.raw_deps[key]) = self.scan() + if Logs.verbose: + Logs.debug('deps: scanner for %s: %r; unresolved: %r', self, bld.node_deps[key], bld.raw_deps[key]) + + # recompute the signature and return it + try: + bld.imp_sigs[key] = self.compute_sig_implicit_deps() + except EnvironmentError: + for k in bld.node_deps.get(self.uid(), []): + if not k.exists(): + Logs.warn('Dependency %r for %r is missing: check the task declaration and the build order!', k, self) + raise + + def compute_sig_implicit_deps(self): + """ + Used by :py:meth:`waflib.Task.Task.sig_implicit_deps` for computing the actual hash of the + :py:class:`waflib.Node.Node` returned by the scanner. + + :return: a hash value for the implicit dependencies + :rtype: string or bytes + """ + upd = self.m.update + self.are_implicit_nodes_ready() + + # scanner returns a node that does not have a signature + # just *ignore* the error and let them figure out from the compiler output + # waf -k behaviour + for k in self.generator.bld.node_deps.get(self.uid(), []): + upd(k.get_bld_sig()) + return self.m.digest() + + def are_implicit_nodes_ready(self): + """ + For each node returned by the scanner, see if there is a task that creates it, + and infer the build order + + This has a low performance impact on null builds (1.86s->1.66s) thanks to caching (28s->1.86s) + """ + bld = self.generator.bld + try: + cache = bld.dct_implicit_nodes + except AttributeError: + bld.dct_implicit_nodes = cache = {} + + # one cache per build group + try: + dct = cache[bld.current_group] + except KeyError: + dct = cache[bld.current_group] = {} + for tsk in bld.cur_tasks: + for x in tsk.outputs: + dct[x] = tsk + + modified = False + for x in bld.node_deps.get(self.uid(), []): + if x in dct: + self.run_after.add(dct[x]) + modified = True + + if modified: + for tsk in self.run_after: + if not tsk.hasrun: + #print "task is not ready..." + raise Errors.TaskNotReady('not ready') +if sys.hexversion > 0x3000000: + def uid(self): + try: + return self.uid_ + except AttributeError: + m = Utils.md5(self.__class__.__name__.encode('latin-1', 'xmlcharrefreplace')) + up = m.update + for x in self.inputs + self.outputs: + up(x.abspath().encode('latin-1', 'xmlcharrefreplace')) + self.uid_ = m.digest() + return self.uid_ + uid.__doc__ = Task.uid.__doc__ + Task.uid = uid + +def is_before(t1, t2): + """ + Returns a non-zero value if task t1 is to be executed before task t2:: + + t1.ext_out = '.h' + t2.ext_in = '.h' + t2.after = ['t1'] + t1.before = ['t2'] + waflib.Task.is_before(t1, t2) # True + + :param t1: Task object + :type t1: :py:class:`waflib.Task.Task` + :param t2: Task object + :type t2: :py:class:`waflib.Task.Task` + """ + to_list = Utils.to_list + for k in to_list(t2.ext_in): + if k in to_list(t1.ext_out): + return 1 + + if t1.__class__.__name__ in to_list(t2.after): + return 1 + + if t2.__class__.__name__ in to_list(t1.before): + return 1 + + return 0 + +def set_file_constraints(tasks): + """ + Updates the ``run_after`` attribute of all tasks based on the task inputs and outputs + + :param tasks: tasks + :type tasks: list of :py:class:`waflib.Task.Task` + """ + ins = Utils.defaultdict(set) + outs = Utils.defaultdict(set) + for x in tasks: + for a in x.inputs: + ins[a].add(x) + for a in x.dep_nodes: + ins[a].add(x) + for a in x.outputs: + outs[a].add(x) + + links = set(ins.keys()).intersection(outs.keys()) + for k in links: + for a in ins[k]: + a.run_after.update(outs[k]) + + +class TaskGroup(object): + """ + Wrap nxm task order constraints into a single object + to prevent the creation of large list/set objects + + This is an optimization + """ + def __init__(self, prev, next): + self.prev = prev + self.next = next + self.done = False + + def get_hasrun(self): + for k in self.prev: + if not k.hasrun: + return NOT_RUN + return SUCCESS + + hasrun = property(get_hasrun, None) + +def set_precedence_constraints(tasks): + """ + Updates the ``run_after`` attribute of all tasks based on the after/before/ext_out/ext_in attributes + + :param tasks: tasks + :type tasks: list of :py:class:`waflib.Task.Task` + """ + cstr_groups = Utils.defaultdict(list) + for x in tasks: + h = x.hash_constraints() + cstr_groups[h].append(x) + + keys = list(cstr_groups.keys()) + maxi = len(keys) + + # this list should be short + for i in range(maxi): + t1 = cstr_groups[keys[i]][0] + for j in range(i + 1, maxi): + t2 = cstr_groups[keys[j]][0] + + # add the constraints based on the comparisons + if is_before(t1, t2): + a = i + b = j + elif is_before(t2, t1): + a = j + b = i + else: + continue + + a = cstr_groups[keys[a]] + b = cstr_groups[keys[b]] + + if len(a) < 2 or len(b) < 2: + for x in b: + x.run_after.update(a) + else: + group = TaskGroup(set(a), set(b)) + for x in b: + x.run_after.add(group) + +def funex(c): + """ + Compiles a scriptlet expression into a Python function + + :param c: function to compile + :type c: string + :return: the function 'f' declared in the input string + :rtype: function + """ + dc = {} + exec(c, dc) + return dc['f'] + +re_cond = re.compile(r'(?P<var>\w+)|(?P<or>\|)|(?P<and>&)') +re_novar = re.compile(r'^(SRC|TGT)\W+.*?$') +reg_act = re.compile(r'(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})', re.M) +def compile_fun_shell(line): + """ + Creates a compiled function to execute a process through a sub-shell + """ + extr = [] + def repl(match): + g = match.group + if g('dollar'): + return "$" + elif g('backslash'): + return '\\\\' + elif g('subst'): + extr.append((g('var'), g('code'))) + return "%s" + return None + line = reg_act.sub(repl, line) or line + dvars = [] + def add_dvar(x): + if x not in dvars: + dvars.append(x) + + def replc(m): + # performs substitutions and populates dvars + if m.group('and'): + return ' and ' + elif m.group('or'): + return ' or ' + else: + x = m.group('var') + add_dvar(x) + return 'env[%r]' % x + + parm = [] + app = parm.append + for (var, meth) in extr: + if var == 'SRC': + if meth: + app('tsk.inputs%s' % meth) + else: + app('" ".join([a.path_from(cwdx) for a in tsk.inputs])') + elif var == 'TGT': + if meth: + app('tsk.outputs%s' % meth) + else: + app('" ".join([a.path_from(cwdx) for a in tsk.outputs])') + elif meth: + if meth.startswith(':'): + add_dvar(var) + m = meth[1:] + if m == 'SRC': + m = '[a.path_from(cwdx) for a in tsk.inputs]' + elif m == 'TGT': + m = '[a.path_from(cwdx) for a in tsk.outputs]' + elif re_novar.match(m): + m = '[tsk.inputs%s]' % m[3:] + elif re_novar.match(m): + m = '[tsk.outputs%s]' % m[3:] + else: + add_dvar(m) + if m[:3] not in ('tsk', 'gen', 'bld'): + m = '%r' % m + app('" ".join(tsk.colon(%r, %s))' % (var, m)) + elif meth.startswith('?'): + # In A?B|C output env.A if one of env.B or env.C is non-empty + expr = re_cond.sub(replc, meth[1:]) + app('p(%r) if (%s) else ""' % (var, expr)) + else: + call = '%s%s' % (var, meth) + add_dvar(call) + app(call) + else: + add_dvar(var) + app("p('%s')" % var) + if parm: + parm = "%% (%s) " % (',\n\t\t'.join(parm)) + else: + parm = '' + + c = COMPILE_TEMPLATE_SHELL % (line, parm) + Logs.debug('action: %s', c.strip().splitlines()) + return (funex(c), dvars) + +reg_act_noshell = re.compile(r"(?P<space>\s+)|(?P<subst>\$\{(?P<var>\w+)(?P<code>.*?)\})|(?P<text>([^$ \t\n\r\f\v]|\$\$)+)", re.M) +def compile_fun_noshell(line): + """ + Creates a compiled function to execute a process without a sub-shell + """ + buf = [] + dvars = [] + merge = False + app = buf.append + + def add_dvar(x): + if x not in dvars: + dvars.append(x) + + def replc(m): + # performs substitutions and populates dvars + if m.group('and'): + return ' and ' + elif m.group('or'): + return ' or ' + else: + x = m.group('var') + add_dvar(x) + return 'env[%r]' % x + + for m in reg_act_noshell.finditer(line): + if m.group('space'): + merge = False + continue + elif m.group('text'): + app('[%r]' % m.group('text').replace('$$', '$')) + elif m.group('subst'): + var = m.group('var') + code = m.group('code') + if var == 'SRC': + if code: + app('[tsk.inputs%s]' % code) + else: + app('[a.path_from(cwdx) for a in tsk.inputs]') + elif var == 'TGT': + if code: + app('[tsk.outputs%s]' % code) + else: + app('[a.path_from(cwdx) for a in tsk.outputs]') + elif code: + if code.startswith(':'): + # a composed variable ${FOO:OUT} + add_dvar(var) + m = code[1:] + if m == 'SRC': + m = '[a.path_from(cwdx) for a in tsk.inputs]' + elif m == 'TGT': + m = '[a.path_from(cwdx) for a in tsk.outputs]' + elif re_novar.match(m): + m = '[tsk.inputs%s]' % m[3:] + elif re_novar.match(m): + m = '[tsk.outputs%s]' % m[3:] + else: + add_dvar(m) + if m[:3] not in ('tsk', 'gen', 'bld'): + m = '%r' % m + app('tsk.colon(%r, %s)' % (var, m)) + elif code.startswith('?'): + # In A?B|C output env.A if one of env.B or env.C is non-empty + expr = re_cond.sub(replc, code[1:]) + app('to_list(env[%r] if (%s) else [])' % (var, expr)) + else: + # plain code such as ${tsk.inputs[0].abspath()} + call = '%s%s' % (var, code) + add_dvar(call) + app('to_list(%s)' % call) + else: + # a plain variable such as # a plain variable like ${AR} + app('to_list(env[%r])' % var) + add_dvar(var) + if merge: + tmp = 'merge(%s, %s)' % (buf[-2], buf[-1]) + del buf[-1] + buf[-1] = tmp + merge = True # next turn + + buf = ['lst.extend(%s)' % x for x in buf] + fun = COMPILE_TEMPLATE_NOSHELL % "\n\t".join(buf) + Logs.debug('action: %s', fun.strip().splitlines()) + return (funex(fun), dvars) + +def compile_fun(line, shell=False): + """ + Parses a string expression such as '${CC} ${SRC} -o ${TGT}' and returns a pair containing: + + * The function created (compiled) for use as :py:meth:`waflib.Task.Task.run` + * The list of variables that must cause rebuilds when *env* data is modified + + for example:: + + from waflib.Task import compile_fun + compile_fun('cxx', '${CXX} -o ${TGT[0]} ${SRC} -I ${SRC[0].parent.bldpath()}') + + def build(bld): + bld(source='wscript', rule='echo "foo\\${SRC[0].name}\\bar"') + + The env variables (CXX, ..) on the task must not hold dicts so as to preserve a consistent order. + The reserved keywords ``TGT`` and ``SRC`` represent the task input and output nodes + + """ + if isinstance(line, str): + if line.find('<') > 0 or line.find('>') > 0 or line.find('&&') > 0: + shell = True + else: + dvars_lst = [] + funs_lst = [] + for x in line: + if isinstance(x, str): + fun, dvars = compile_fun(x, shell) + dvars_lst += dvars + funs_lst.append(fun) + else: + # assume a function to let through + funs_lst.append(x) + def composed_fun(task): + for x in funs_lst: + ret = x(task) + if ret: + return ret + return None + return composed_fun, dvars_lst + if shell: + return compile_fun_shell(line) + else: + return compile_fun_noshell(line) + +def compile_sig_vars(vars): + """ + This method produces a sig_vars method suitable for subclasses that provide + scriptlet code in their run_str code. + If no such method can be created, this method returns None. + + The purpose of the sig_vars method returned is to ensures + that rebuilds occur whenever the contents of the expression changes. + This is the case B below:: + + import time + # case A: regular variables + tg = bld(rule='echo ${FOO}') + tg.env.FOO = '%s' % time.time() + # case B + bld(rule='echo ${gen.foo}', foo='%s' % time.time()) + + :param vars: env variables such as CXXFLAGS or gen.foo + :type vars: list of string + :return: A sig_vars method relevant for dependencies if adequate, else None + :rtype: A function, or None in most cases + """ + buf = [] + for x in sorted(vars): + if x[:3] in ('tsk', 'gen', 'bld'): + buf.append('buf.append(%s)' % x) + if buf: + return funex(COMPILE_TEMPLATE_SIG_VARS % '\n\t'.join(buf)) + return None + +def task_factory(name, func=None, vars=None, color='GREEN', ext_in=[], ext_out=[], before=[], after=[], shell=False, scan=None): + """ + Returns a new task subclass with the function ``run`` compiled from the line given. + + :param func: method run + :type func: string or function + :param vars: list of variables to hash + :type vars: list of string + :param color: color to use + :type color: string + :param shell: when *func* is a string, enable/disable the use of the shell + :type shell: bool + :param scan: method scan + :type scan: function + :rtype: :py:class:`waflib.Task.Task` + """ + + params = { + 'vars': vars or [], # function arguments are static, and this one may be modified by the class + 'color': color, + 'name': name, + 'shell': shell, + 'scan': scan, + } + + if isinstance(func, str) or isinstance(func, tuple): + params['run_str'] = func + else: + params['run'] = func + + cls = type(Task)(name, (Task,), params) + classes[name] = cls + + if ext_in: + cls.ext_in = Utils.to_list(ext_in) + if ext_out: + cls.ext_out = Utils.to_list(ext_out) + if before: + cls.before = Utils.to_list(before) + if after: + cls.after = Utils.to_list(after) + + return cls + +def deep_inputs(cls): + """ + Task class decorator to enable rebuilds on input files task signatures + """ + def sig_explicit_deps(self): + Task.sig_explicit_deps(self) + Task.sig_deep_inputs(self) + cls.sig_explicit_deps = sig_explicit_deps + return cls + +TaskBase = Task +"Provided for compatibility reasons, TaskBase should not be used" + +class TaskSemaphore(object): + """ + Task semaphores provide a simple and efficient way of throttling the amount of + a particular task to run concurrently. The throttling value is capped + by the amount of maximum jobs, so for example, a `TaskSemaphore(10)` + has no effect in a `-j2` build. + + Task semaphores are typically specified on the task class level:: + + class compile(waflib.Task.Task): + semaphore = waflib.Task.TaskSemaphore(2) + run_str = 'touch ${TGT}' + + Task semaphores are meant to be used by the build scheduler in the main + thread, so there are no guarantees of thread safety. + """ + def __init__(self, num): + """ + :param num: maximum value of concurrent tasks + :type num: int + """ + self.num = num + self.locking = set() + self.waiting = set() + + def is_locked(self): + """Returns True if this semaphore cannot be acquired by more tasks""" + return len(self.locking) >= self.num + + def acquire(self, tsk): + """ + Mark the semaphore as used by the given task (not re-entrant). + + :param tsk: task object + :type tsk: :py:class:`waflib.Task.Task` + :raises: :py:class:`IndexError` in case the resource is already acquired + """ + if self.is_locked(): + raise IndexError('Cannot lock more %r' % self.locking) + self.locking.add(tsk) + + def release(self, tsk): + """ + Mark the semaphore as unused by the given task. + + :param tsk: task object + :type tsk: :py:class:`waflib.Task.Task` + :raises: :py:class:`KeyError` in case the resource is not acquired by the task + """ + self.locking.remove(tsk) + diff --git a/third_party/waf/waflib/TaskGen.py b/third_party/waf/waflib/TaskGen.py new file mode 100644 index 0000000..32468f0 --- /dev/null +++ b/third_party/waf/waflib/TaskGen.py @@ -0,0 +1,913 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Task generators + +The class :py:class:`waflib.TaskGen.task_gen` encapsulates the creation of task objects (low-level code) +The instances can have various parameters, but the creation of task nodes (Task.py) +is deferred. To achieve this, various methods are called from the method "apply" +""" + +import copy, re, os, functools +from waflib import Task, Utils, Logs, Errors, ConfigSet, Node + +feats = Utils.defaultdict(set) +"""remember the methods declaring features""" + +HEADER_EXTS = ['.h', '.hpp', '.hxx', '.hh'] + +class task_gen(object): + """ + Instances of this class create :py:class:`waflib.Task.Task` when + calling the method :py:meth:`waflib.TaskGen.task_gen.post` from the main thread. + A few notes: + + * The methods to call (*self.meths*) can be specified dynamically (removing, adding, ..) + * The 'features' are used to add methods to self.meths and then execute them + * The attribute 'path' is a node representing the location of the task generator + * The tasks created are added to the attribute *tasks* + * The attribute 'idx' is a counter of task generators in the same path + """ + + mappings = Utils.ordered_iter_dict() + """Mappings are global file extension mappings that are retrieved in the order of definition""" + + prec = Utils.defaultdict(set) + """Dict that holds the precedence execution rules for task generator methods""" + + def __init__(self, *k, **kw): + """ + Task generator objects predefine various attributes (source, target) for possible + processing by process_rule (make-like rules) or process_source (extensions, misc methods) + + Tasks are stored on the attribute 'tasks'. They are created by calling methods + listed in ``self.meths`` or referenced in the attribute ``features`` + A topological sort is performed to execute the methods in correct order. + + The extra key/value elements passed in ``kw`` are set as attributes + """ + self.source = [] + self.target = '' + + self.meths = [] + """ + List of method names to execute (internal) + """ + + self.features = [] + """ + List of feature names for bringing new methods in + """ + + self.tasks = [] + """ + Tasks created are added to this list + """ + + if not 'bld' in kw: + # task generators without a build context :-/ + self.env = ConfigSet.ConfigSet() + self.idx = 0 + self.path = None + else: + self.bld = kw['bld'] + self.env = self.bld.env.derive() + self.path = kw.get('path', self.bld.path) # by default, emulate chdir when reading scripts + + # Provide a unique index per folder + # This is part of a measure to prevent output file name collisions + path = self.path.abspath() + try: + self.idx = self.bld.idx[path] = self.bld.idx.get(path, 0) + 1 + except AttributeError: + self.bld.idx = {} + self.idx = self.bld.idx[path] = 1 + + # Record the global task generator count + try: + self.tg_idx_count = self.bld.tg_idx_count = self.bld.tg_idx_count + 1 + except AttributeError: + self.tg_idx_count = self.bld.tg_idx_count = 1 + + for key, val in kw.items(): + setattr(self, key, val) + + def __str__(self): + """Debugging helper""" + return "<task_gen %r declared in %s>" % (self.name, self.path.abspath()) + + def __repr__(self): + """Debugging helper""" + lst = [] + for x in self.__dict__: + if x not in ('env', 'bld', 'compiled_tasks', 'tasks'): + lst.append("%s=%s" % (x, repr(getattr(self, x)))) + return "bld(%s) in %s" % (", ".join(lst), self.path.abspath()) + + def get_cwd(self): + """ + Current working directory for the task generator, defaults to the build directory. + This is still used in a few places but it should disappear at some point as the classes + define their own working directory. + + :rtype: :py:class:`waflib.Node.Node` + """ + return self.bld.bldnode + + def get_name(self): + """ + If the attribute ``name`` is not set on the instance, + the name is computed from the target name:: + + def build(bld): + x = bld(name='foo') + x.get_name() # foo + y = bld(target='bar') + y.get_name() # bar + + :rtype: string + :return: name of this task generator + """ + try: + return self._name + except AttributeError: + if isinstance(self.target, list): + lst = [str(x) for x in self.target] + name = self._name = ','.join(lst) + else: + name = self._name = str(self.target) + return name + def set_name(self, name): + self._name = name + + name = property(get_name, set_name) + + def to_list(self, val): + """ + Ensures that a parameter is a list, see :py:func:`waflib.Utils.to_list` + + :type val: string or list of string + :param val: input to return as a list + :rtype: list + """ + if isinstance(val, str): + return val.split() + else: + return val + + def post(self): + """ + Creates tasks for this task generators. The following operations are performed: + + #. The body of this method is called only once and sets the attribute ``posted`` + #. The attribute ``features`` is used to add more methods in ``self.meths`` + #. The methods are sorted by the precedence table ``self.prec`` or `:waflib:attr:waflib.TaskGen.task_gen.prec` + #. The methods are then executed in order + #. The tasks created are added to :py:attr:`waflib.TaskGen.task_gen.tasks` + """ + if getattr(self, 'posted', None): + return False + self.posted = True + + keys = set(self.meths) + keys.update(feats['*']) + + # add the methods listed in the features + self.features = Utils.to_list(self.features) + for x in self.features: + st = feats[x] + if st: + keys.update(st) + elif not x in Task.classes: + Logs.warn('feature %r does not exist - bind at least one method to it?', x) + + # copy the precedence table + prec = {} + prec_tbl = self.prec + for x in prec_tbl: + if x in keys: + prec[x] = prec_tbl[x] + + # elements disconnected + tmp = [] + for a in keys: + for x in prec.values(): + if a in x: + break + else: + tmp.append(a) + + tmp.sort(reverse=True) + + # topological sort + out = [] + while tmp: + e = tmp.pop() + if e in keys: + out.append(e) + try: + nlst = prec[e] + except KeyError: + pass + else: + del prec[e] + for x in nlst: + for y in prec: + if x in prec[y]: + break + else: + tmp.append(x) + tmp.sort(reverse=True) + + if prec: + buf = ['Cycle detected in the method execution:'] + for k, v in prec.items(): + buf.append('- %s after %s' % (k, [x for x in v if x in prec])) + raise Errors.WafError('\n'.join(buf)) + self.meths = out + + # then we run the methods in order + Logs.debug('task_gen: posting %s %d', self, id(self)) + for x in out: + try: + v = getattr(self, x) + except AttributeError: + raise Errors.WafError('%r is not a valid task generator method' % x) + Logs.debug('task_gen: -> %s (%d)', x, id(self)) + v() + + Logs.debug('task_gen: posted %s', self.name) + return True + + def get_hook(self, node): + """ + Returns the ``@extension`` method to call for a Node of a particular extension. + + :param node: Input file to process + :type node: :py:class:`waflib.Tools.Node.Node` + :return: A method able to process the input node by looking at the extension + :rtype: function + """ + name = node.name + for k in self.mappings: + try: + if name.endswith(k): + return self.mappings[k] + except TypeError: + # regexps objects + if k.match(name): + return self.mappings[k] + keys = list(self.mappings.keys()) + raise Errors.WafError("File %r has no mapping in %r (load a waf tool?)" % (node, keys)) + + def create_task(self, name, src=None, tgt=None, **kw): + """ + Creates task instances. + + :param name: task class name + :type name: string + :param src: input nodes + :type src: list of :py:class:`waflib.Tools.Node.Node` + :param tgt: output nodes + :type tgt: list of :py:class:`waflib.Tools.Node.Node` + :return: A task object + :rtype: :py:class:`waflib.Task.Task` + """ + task = Task.classes[name](env=self.env.derive(), generator=self) + if src: + task.set_inputs(src) + if tgt: + task.set_outputs(tgt) + task.__dict__.update(kw) + self.tasks.append(task) + return task + + def clone(self, env): + """ + Makes a copy of a task generator. Once the copy is made, it is necessary to ensure that the + it does not create the same output files as the original, or the same files may + be compiled several times. + + :param env: A configuration set + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :return: A copy + :rtype: :py:class:`waflib.TaskGen.task_gen` + """ + newobj = self.bld() + for x in self.__dict__: + if x in ('env', 'bld'): + continue + elif x in ('path', 'features'): + setattr(newobj, x, getattr(self, x)) + else: + setattr(newobj, x, copy.copy(getattr(self, x))) + + newobj.posted = False + if isinstance(env, str): + newobj.env = self.bld.all_envs[env].derive() + else: + newobj.env = env.derive() + + return newobj + +def declare_chain(name='', rule=None, reentrant=None, color='BLUE', + ext_in=[], ext_out=[], before=[], after=[], decider=None, scan=None, install_path=None, shell=False): + """ + Creates a new mapping and a task class for processing files by extension. + See Tools/flex.py for an example. + + :param name: name for the task class + :type name: string + :param rule: function to execute or string to be compiled in a function + :type rule: string or function + :param reentrant: re-inject the output file in the process (done automatically, set to 0 to disable) + :type reentrant: int + :param color: color for the task output + :type color: string + :param ext_in: execute the task only after the files of such extensions are created + :type ext_in: list of string + :param ext_out: execute the task only before files of such extensions are processed + :type ext_out: list of string + :param before: execute instances of this task before classes of the given names + :type before: list of string + :param after: execute instances of this task after classes of the given names + :type after: list of string + :param decider: if present, function that returns a list of output file extensions (overrides ext_out for output files, but not for the build order) + :type decider: function + :param scan: scanner function for the task + :type scan: function + :param install_path: installation path for the output nodes + :type install_path: string + """ + ext_in = Utils.to_list(ext_in) + ext_out = Utils.to_list(ext_out) + if not name: + name = rule + cls = Task.task_factory(name, rule, color=color, ext_in=ext_in, ext_out=ext_out, before=before, after=after, scan=scan, shell=shell) + + def x_file(self, node): + if ext_in: + _ext_in = ext_in[0] + + tsk = self.create_task(name, node) + cnt = 0 + + ext = decider(self, node) if decider else cls.ext_out + for x in ext: + k = node.change_ext(x, ext_in=_ext_in) + tsk.outputs.append(k) + + if reentrant != None: + if cnt < int(reentrant): + self.source.append(k) + else: + # reinject downstream files into the build + for y in self.mappings: # ~ nfile * nextensions :-/ + if k.name.endswith(y): + self.source.append(k) + break + cnt += 1 + + if install_path: + self.install_task = self.add_install_files(install_to=install_path, install_from=tsk.outputs) + return tsk + + for x in cls.ext_in: + task_gen.mappings[x] = x_file + return x_file + +def taskgen_method(func): + """ + Decorator that registers method as a task generator method. + The function must accept a task generator as first parameter:: + + from waflib.TaskGen import taskgen_method + @taskgen_method + def mymethod(self): + pass + + :param func: task generator method to add + :type func: function + :rtype: function + """ + setattr(task_gen, func.__name__, func) + return func + +def feature(*k): + """ + Decorator that registers a task generator method that will be executed when the + object attribute ``feature`` contains the corresponding key(s):: + + from waflib.TaskGen import feature + @feature('myfeature') + def myfunction(self): + print('that is my feature!') + def build(bld): + bld(features='myfeature') + + :param k: feature names + :type k: list of string + """ + def deco(func): + setattr(task_gen, func.__name__, func) + for name in k: + feats[name].update([func.__name__]) + return func + return deco + +def before_method(*k): + """ + Decorator that registera task generator method which will be executed + before the functions of given name(s):: + + from waflib.TaskGen import feature, before + @feature('myfeature') + @before_method('fun2') + def fun1(self): + print('feature 1!') + @feature('myfeature') + def fun2(self): + print('feature 2!') + def build(bld): + bld(features='myfeature') + + :param k: method names + :type k: list of string + """ + def deco(func): + setattr(task_gen, func.__name__, func) + for fun_name in k: + task_gen.prec[func.__name__].add(fun_name) + return func + return deco +before = before_method + +def after_method(*k): + """ + Decorator that registers a task generator method which will be executed + after the functions of given name(s):: + + from waflib.TaskGen import feature, after + @feature('myfeature') + @after_method('fun2') + def fun1(self): + print('feature 1!') + @feature('myfeature') + def fun2(self): + print('feature 2!') + def build(bld): + bld(features='myfeature') + + :param k: method names + :type k: list of string + """ + def deco(func): + setattr(task_gen, func.__name__, func) + for fun_name in k: + task_gen.prec[fun_name].add(func.__name__) + return func + return deco +after = after_method + +def extension(*k): + """ + Decorator that registers a task generator method which will be invoked during + the processing of source files for the extension given:: + + from waflib import Task + class mytask(Task): + run_str = 'cp ${SRC} ${TGT}' + @extension('.moo') + def create_maa_file(self, node): + self.create_task('mytask', node, node.change_ext('.maa')) + def build(bld): + bld(source='foo.moo') + """ + def deco(func): + setattr(task_gen, func.__name__, func) + for x in k: + task_gen.mappings[x] = func + return func + return deco + +@taskgen_method +def to_nodes(self, lst, path=None): + """ + Flatten the input list of string/nodes/lists into a list of nodes. + + It is used by :py:func:`waflib.TaskGen.process_source` and :py:func:`waflib.TaskGen.process_rule`. + It is designed for source files, for folders, see :py:func:`waflib.Tools.ccroot.to_incnodes`: + + :param lst: input list + :type lst: list of string and nodes + :param path: path from which to search the nodes (by default, :py:attr:`waflib.TaskGen.task_gen.path`) + :type path: :py:class:`waflib.Tools.Node.Node` + :rtype: list of :py:class:`waflib.Tools.Node.Node` + """ + tmp = [] + path = path or self.path + find = path.find_resource + + if isinstance(lst, Node.Node): + lst = [lst] + + for x in Utils.to_list(lst): + if isinstance(x, str): + node = find(x) + elif hasattr(x, 'name'): + node = x + else: + tmp.extend(self.to_nodes(x)) + continue + if not node: + raise Errors.WafError('source not found: %r in %r' % (x, self)) + tmp.append(node) + return tmp + +@feature('*') +def process_source(self): + """ + Processes each element in the attribute ``source`` by extension. + + #. The *source* list is converted through :py:meth:`waflib.TaskGen.to_nodes` to a list of :py:class:`waflib.Node.Node` first. + #. File extensions are mapped to methods having the signature: ``def meth(self, node)`` by :py:meth:`waflib.TaskGen.extension` + #. The method is retrieved through :py:meth:`waflib.TaskGen.task_gen.get_hook` + #. When called, the methods may modify self.source to append more source to process + #. The mappings can map an extension or a filename (see the code below) + """ + self.source = self.to_nodes(getattr(self, 'source', [])) + for node in self.source: + self.get_hook(node)(self, node) + +@feature('*') +@before_method('process_source') +def process_rule(self): + """ + Processes the attribute ``rule``. When present, :py:meth:`waflib.TaskGen.process_source` is disabled:: + + def build(bld): + bld(rule='cp ${SRC} ${TGT}', source='wscript', target='bar.txt') + + Main attributes processed: + + * rule: command to execute, it can be a tuple of strings for multiple commands + * chmod: permissions for the resulting files (integer value such as Utils.O755) + * shell: set to False to execute the command directly (default is True to use a shell) + * scan: scanner function + * vars: list of variables to trigger rebuilds, such as CFLAGS + * cls_str: string to display when executing the task + * cls_keyword: label to display when executing the task + * cache_rule: by default, try to re-use similar classes, set to False to disable + * source: list of Node or string objects representing the source files required by this task + * target: list of Node or string objects representing the files that this task creates + * cwd: current working directory (Node or string) + * stdout: standard output, set to None to prevent waf from capturing the text + * stderr: standard error, set to None to prevent waf from capturing the text + * timeout: timeout for command execution (Python 3) + * always: whether to always run the command (False by default) + * deep_inputs: whether the task must depend on the input file tasks too (False by default) + """ + if not getattr(self, 'rule', None): + return + + # create the task class + name = str(getattr(self, 'name', None) or self.target or getattr(self.rule, '__name__', self.rule)) + + # or we can put the class in a cache for performance reasons + try: + cache = self.bld.cache_rule_attr + except AttributeError: + cache = self.bld.cache_rule_attr = {} + + chmod = getattr(self, 'chmod', None) + shell = getattr(self, 'shell', True) + color = getattr(self, 'color', 'BLUE') + scan = getattr(self, 'scan', None) + _vars = getattr(self, 'vars', []) + cls_str = getattr(self, 'cls_str', None) + cls_keyword = getattr(self, 'cls_keyword', None) + use_cache = getattr(self, 'cache_rule', 'True') + deep_inputs = getattr(self, 'deep_inputs', False) + + scan_val = has_deps = hasattr(self, 'deps') + if scan: + scan_val = id(scan) + + key = Utils.h_list((name, self.rule, chmod, shell, color, cls_str, cls_keyword, scan_val, _vars, deep_inputs)) + + cls = None + if use_cache: + try: + cls = cache[key] + except KeyError: + pass + if not cls: + rule = self.rule + if chmod is not None: + def chmod_fun(tsk): + for x in tsk.outputs: + os.chmod(x.abspath(), tsk.generator.chmod) + if isinstance(rule, tuple): + rule = list(rule) + rule.append(chmod_fun) + rule = tuple(rule) + else: + rule = (rule, chmod_fun) + + cls = Task.task_factory(name, rule, _vars, shell=shell, color=color) + + if cls_str: + setattr(cls, '__str__', self.cls_str) + + if cls_keyword: + setattr(cls, 'keyword', self.cls_keyword) + + if deep_inputs: + Task.deep_inputs(cls) + + if scan: + cls.scan = self.scan + elif has_deps: + def scan(self): + deps = getattr(self.generator, 'deps', None) + nodes = self.generator.to_nodes(deps) + return [nodes, []] + cls.scan = scan + + if use_cache: + cache[key] = cls + + # now create one instance + tsk = self.create_task(name) + + for x in ('after', 'before', 'ext_in', 'ext_out'): + setattr(tsk, x, getattr(self, x, [])) + + if hasattr(self, 'stdout'): + tsk.stdout = self.stdout + + if hasattr(self, 'stderr'): + tsk.stderr = self.stderr + + if getattr(self, 'timeout', None): + tsk.timeout = self.timeout + + if getattr(self, 'always', None): + tsk.always_run = True + + if getattr(self, 'target', None): + if isinstance(self.target, str): + self.target = self.target.split() + if not isinstance(self.target, list): + self.target = [self.target] + for x in self.target: + if isinstance(x, str): + tsk.outputs.append(self.path.find_or_declare(x)) + else: + x.parent.mkdir() # if a node was given, create the required folders + tsk.outputs.append(x) + if getattr(self, 'install_path', None): + self.install_task = self.add_install_files(install_to=self.install_path, + install_from=tsk.outputs, chmod=getattr(self, 'chmod', Utils.O644)) + + if getattr(self, 'source', None): + tsk.inputs = self.to_nodes(self.source) + # bypass the execution of process_source by setting the source to an empty list + self.source = [] + + if getattr(self, 'cwd', None): + tsk.cwd = self.cwd + + if isinstance(tsk.run, functools.partial): + # Python documentation says: "partial objects defined in classes + # behave like static methods and do not transform into bound + # methods during instance attribute look-up." + tsk.run = functools.partial(tsk.run, tsk) + +@feature('seq') +def sequence_order(self): + """ + Adds a strict sequential constraint between the tasks generated by task generators. + It works because task generators are posted in order. + It will not post objects which belong to other folders. + + Example:: + + bld(features='javac seq') + bld(features='jar seq') + + To start a new sequence, set the attribute seq_start, for example:: + + obj = bld(features='seq') + obj.seq_start = True + + Note that the method is executed in last position. This is more an + example than a widely-used solution. + """ + if self.meths and self.meths[-1] != 'sequence_order': + self.meths.append('sequence_order') + return + + if getattr(self, 'seq_start', None): + return + + # all the tasks previously declared must be run before these + if getattr(self.bld, 'prev', None): + self.bld.prev.post() + for x in self.bld.prev.tasks: + for y in self.tasks: + y.set_run_after(x) + + self.bld.prev = self + + +re_m4 = re.compile(r'@(\w+)@', re.M) + +class subst_pc(Task.Task): + """ + Creates *.pc* files from *.pc.in*. The task is executed whenever an input variable used + in the substitution changes. + """ + + def force_permissions(self): + "Private for the time being, we will probably refactor this into run_str=[run1,chmod]" + if getattr(self.generator, 'chmod', None): + for x in self.outputs: + os.chmod(x.abspath(), self.generator.chmod) + + def run(self): + "Substitutes variables in a .in file" + + if getattr(self.generator, 'is_copy', None): + for i, x in enumerate(self.outputs): + x.write(self.inputs[i].read('rb'), 'wb') + stat = os.stat(self.inputs[i].abspath()) # Preserve mtime of the copy + os.utime(self.outputs[i].abspath(), (stat.st_atime, stat.st_mtime)) + self.force_permissions() + return None + + if getattr(self.generator, 'fun', None): + ret = self.generator.fun(self) + if not ret: + self.force_permissions() + return ret + + code = self.inputs[0].read(encoding=getattr(self.generator, 'encoding', 'latin-1')) + if getattr(self.generator, 'subst_fun', None): + code = self.generator.subst_fun(self, code) + if code is not None: + self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1')) + self.force_permissions() + return None + + # replace all % by %% to prevent errors by % signs + code = code.replace('%', '%%') + + # extract the vars foo into lst and replace @foo@ by %(foo)s + lst = [] + def repl(match): + g = match.group + if g(1): + lst.append(g(1)) + return "%%(%s)s" % g(1) + return '' + code = getattr(self.generator, 're_m4', re_m4).sub(repl, code) + + try: + d = self.generator.dct + except AttributeError: + d = {} + for x in lst: + tmp = getattr(self.generator, x, '') or self.env[x] or self.env[x.upper()] + try: + tmp = ''.join(tmp) + except TypeError: + tmp = str(tmp) + d[x] = tmp + + code = code % d + self.outputs[0].write(code, encoding=getattr(self.generator, 'encoding', 'latin-1')) + self.generator.bld.raw_deps[self.uid()] = lst + + # make sure the signature is updated + try: + delattr(self, 'cache_sig') + except AttributeError: + pass + + self.force_permissions() + + def sig_vars(self): + """ + Compute a hash (signature) of the variables used in the substitution + """ + bld = self.generator.bld + env = self.env + upd = self.m.update + + if getattr(self.generator, 'fun', None): + upd(Utils.h_fun(self.generator.fun).encode()) + if getattr(self.generator, 'subst_fun', None): + upd(Utils.h_fun(self.generator.subst_fun).encode()) + + # raw_deps: persistent custom values returned by the scanner + vars = self.generator.bld.raw_deps.get(self.uid(), []) + + # hash both env vars and task generator attributes + act_sig = bld.hash_env_vars(env, vars) + upd(act_sig) + + lst = [getattr(self.generator, x, '') for x in vars] + upd(Utils.h_list(lst)) + + return self.m.digest() + +@extension('.pc.in') +def add_pcfile(self, node): + """ + Processes *.pc.in* files to *.pc*. Installs the results to ``${PREFIX}/lib/pkgconfig/`` by default + + def build(bld): + bld(source='foo.pc.in', install_path='${LIBDIR}/pkgconfig/') + """ + tsk = self.create_task('subst_pc', node, node.change_ext('.pc', '.pc.in')) + self.install_task = self.add_install_files( + install_to=getattr(self, 'install_path', '${LIBDIR}/pkgconfig/'), install_from=tsk.outputs) + +class subst(subst_pc): + pass + +@feature('subst') +@before_method('process_source', 'process_rule') +def process_subst(self): + """ + Defines a transformation that substitutes the contents of *source* files to *target* files:: + + def build(bld): + bld( + features='subst', + source='foo.c.in', + target='foo.c', + install_path='${LIBDIR}/pkgconfig', + VAR = 'val' + ) + + The input files are supposed to contain macros of the form *@VAR@*, where *VAR* is an argument + of the task generator object. + + This method overrides the processing by :py:meth:`waflib.TaskGen.process_source`. + """ + + src = Utils.to_list(getattr(self, 'source', [])) + if isinstance(src, Node.Node): + src = [src] + tgt = Utils.to_list(getattr(self, 'target', [])) + if isinstance(tgt, Node.Node): + tgt = [tgt] + if len(src) != len(tgt): + raise Errors.WafError('invalid number of source/target for %r' % self) + + for x, y in zip(src, tgt): + if not x or not y: + raise Errors.WafError('null source or target for %r' % self) + a, b = None, None + + if isinstance(x, str) and isinstance(y, str) and x == y: + a = self.path.find_node(x) + b = self.path.get_bld().make_node(y) + if not os.path.isfile(b.abspath()): + b.parent.mkdir() + else: + if isinstance(x, str): + a = self.path.find_resource(x) + elif isinstance(x, Node.Node): + a = x + if isinstance(y, str): + b = self.path.find_or_declare(y) + elif isinstance(y, Node.Node): + b = y + + if not a: + raise Errors.WafError('could not find %r for %r' % (x, self)) + + tsk = self.create_task('subst', a, b) + for k in ('after', 'before', 'ext_in', 'ext_out'): + val = getattr(self, k, None) + if val: + setattr(tsk, k, val) + + # paranoid safety measure for the general case foo.in->foo.h with ambiguous dependencies + for xt in HEADER_EXTS: + if b.name.endswith(xt): + tsk.ext_out = tsk.ext_out + ['.h'] + break + + inst_to = getattr(self, 'install_path', None) + if inst_to: + self.install_task = self.add_install_files(install_to=inst_to, + install_from=b, chmod=getattr(self, 'chmod', Utils.O644)) + + self.source = [] + diff --git a/third_party/waf/waflib/Tools/__init__.py b/third_party/waf/waflib/Tools/__init__.py new file mode 100644 index 0000000..079df35 --- /dev/null +++ b/third_party/waf/waflib/Tools/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) diff --git a/third_party/waf/waflib/Tools/ar.py b/third_party/waf/waflib/Tools/ar.py new file mode 100644 index 0000000..b39b645 --- /dev/null +++ b/third_party/waf/waflib/Tools/ar.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) + +""" +The **ar** program creates static libraries. This tool is almost always loaded +from others (C, C++, D, etc) for static library support. +""" + +from waflib.Configure import conf + +@conf +def find_ar(conf): + """Configuration helper used by C/C++ tools to enable the support for static libraries""" + conf.load('ar') + +def configure(conf): + """Finds the ar program and sets the default flags in ``conf.env.ARFLAGS``""" + conf.find_program('ar', var='AR') + conf.add_os_flags('ARFLAGS') + if not conf.env.ARFLAGS: + conf.env.ARFLAGS = ['rcs'] + diff --git a/third_party/waf/waflib/Tools/asm.py b/third_party/waf/waflib/Tools/asm.py new file mode 100644 index 0000000..1d34dda --- /dev/null +++ b/third_party/waf/waflib/Tools/asm.py @@ -0,0 +1,107 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2008-2018 (ita) + +""" +Assembly support, used by tools such as gas and nasm + +To declare targets using assembly:: + + def configure(conf): + conf.load('gcc gas') + + def build(bld): + bld( + features='c cstlib asm', + source = 'test.S', + target = 'asmtest') + + bld( + features='asm asmprogram', + source = 'test.S', + target = 'asmtest') + +Support for pure asm programs and libraries should also work:: + + def configure(conf): + conf.load('nasm') + conf.find_program('ld', 'ASLINK') + + def build(bld): + bld( + features='asm asmprogram', + source = 'test.S', + target = 'asmtest') +""" + +import re +from waflib import Errors, Logs, Task +from waflib.Tools.ccroot import link_task, stlink_task +from waflib.TaskGen import extension +from waflib.Tools import c_preproc + +re_lines = re.compile( + '^[ \t]*(?:%)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef)[ \t]*(.*)\r*$', + re.IGNORECASE | re.MULTILINE) + +class asm_parser(c_preproc.c_parser): + def filter_comments(self, node): + code = node.read() + code = c_preproc.re_nl.sub('', code) + code = c_preproc.re_cpp.sub(c_preproc.repl, code) + return re_lines.findall(code) + +class asm(Task.Task): + """ + Compiles asm files by gas/nasm/yasm/... + """ + color = 'BLUE' + run_str = '${AS} ${ASFLAGS} ${ASMPATH_ST:INCPATHS} ${ASMDEFINES_ST:DEFINES} ${AS_SRC_F}${SRC} ${AS_TGT_F}${TGT}' + + def scan(self): + if self.env.ASM_NAME == 'gas': + return c_preproc.scan(self) + elif self.env.ASM_NAME == 'nasm': + Logs.warn('The Nasm dependency scanner is incomplete!') + + try: + incn = self.generator.includes_nodes + except AttributeError: + raise Errors.WafError('%r is missing the "asm" feature' % self.generator) + + if c_preproc.go_absolute: + nodepaths = incn + else: + nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] + + tmp = asm_parser(nodepaths) + tmp.start(self.inputs[0], self.env) + return (tmp.nodes, tmp.names) + +@extension('.s', '.S', '.asm', '.ASM', '.spp', '.SPP') +def asm_hook(self, node): + """ + Binds the asm extension to the asm task + + :param node: input file + :type node: :py:class:`waflib.Node.Node` + """ + return self.create_compiled_task('asm', node) + +class asmprogram(link_task): + "Links object files into a c program" + run_str = '${ASLINK} ${ASLINKFLAGS} ${ASLNK_TGT_F}${TGT} ${ASLNK_SRC_F}${SRC}' + ext_out = ['.bin'] + inst_to = '${BINDIR}' + +class asmshlib(asmprogram): + "Links object files into a c shared library" + inst_to = '${LIBDIR}' + +class asmstlib(stlink_task): + "Links object files into a c static library" + pass # do not remove + +def configure(conf): + conf.env.ASMPATH_ST = '-I%s' + conf.env.ASMDEFINES_ST = '-D%s' diff --git a/third_party/waf/waflib/Tools/bison.py b/third_party/waf/waflib/Tools/bison.py new file mode 100644 index 0000000..eef56dc --- /dev/null +++ b/third_party/waf/waflib/Tools/bison.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python +# encoding: utf-8 +# John O'Meara, 2006 +# Thomas Nagy 2009-2018 (ita) + +""" +The **bison** program is a code generator which creates C or C++ files. +The generated files are compiled into object files. +""" + +from waflib import Task +from waflib.TaskGen import extension + +class bison(Task.Task): + """Compiles bison files""" + color = 'BLUE' + run_str = '${BISON} ${BISONFLAGS} ${SRC[0].abspath()} -o ${TGT[0].name}' + ext_out = ['.h'] # just to make sure + +@extension('.y', '.yc', '.yy') +def big_bison(self, node): + """ + Creates a bison task, which must be executed from the directory of the output file. + """ + has_h = '-d' in self.env.BISONFLAGS + + outs = [] + if node.name.endswith('.yc'): + outs.append(node.change_ext('.tab.cc')) + if has_h: + outs.append(node.change_ext('.tab.hh')) + else: + outs.append(node.change_ext('.tab.c')) + if has_h: + outs.append(node.change_ext('.tab.h')) + + tsk = self.create_task('bison', node, outs) + tsk.cwd = node.parent.get_bld() + + # and the c/cxx file must be compiled too + self.source.append(outs[0]) + +def configure(conf): + """ + Detects the *bison* program + """ + conf.find_program('bison', var='BISON') + conf.env.BISONFLAGS = ['-d'] + diff --git a/third_party/waf/waflib/Tools/c.py b/third_party/waf/waflib/Tools/c.py new file mode 100644 index 0000000..effd6b6 --- /dev/null +++ b/third_party/waf/waflib/Tools/c.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +"Base for c programs/libraries" + +from waflib import TaskGen, Task +from waflib.Tools import c_preproc +from waflib.Tools.ccroot import link_task, stlink_task + +@TaskGen.extension('.c') +def c_hook(self, node): + "Binds the c file extensions create :py:class:`waflib.Tools.c.c` instances" + if not self.env.CC and self.env.CXX: + return self.create_compiled_task('cxx', node) + return self.create_compiled_task('c', node) + +class c(Task.Task): + "Compiles C files into object files" + run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CC_SRC_F}${SRC} ${CC_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' + vars = ['CCDEPS'] # unused variable to depend on, just in case + ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] + scan = c_preproc.scan + +class cprogram(link_task): + "Links object files into c programs" + run_str = '${LINK_CC} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' + ext_out = ['.bin'] + vars = ['LINKDEPS'] + inst_to = '${BINDIR}' + +class cshlib(cprogram): + "Links object files into c shared libraries" + inst_to = '${LIBDIR}' + +class cstlib(stlink_task): + "Links object files into a c static libraries" + pass # do not remove + diff --git a/third_party/waf/waflib/Tools/c_aliases.py b/third_party/waf/waflib/Tools/c_aliases.py new file mode 100644 index 0000000..928cfe2 --- /dev/null +++ b/third_party/waf/waflib/Tools/c_aliases.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2015 (ita) + +"base for all c/c++ programs and libraries" + +from waflib import Utils, Errors +from waflib.Configure import conf + +def get_extensions(lst): + """ + Returns the file extensions for the list of files given as input + + :param lst: files to process + :list lst: list of string or :py:class:`waflib.Node.Node` + :return: list of file extensions + :rtype: list of string + """ + ret = [] + for x in Utils.to_list(lst): + if not isinstance(x, str): + x = x.name + ret.append(x[x.rfind('.') + 1:]) + return ret + +def sniff_features(**kw): + """ + Computes and returns the features required for a task generator by + looking at the file extensions. This aimed for C/C++ mainly:: + + snif_features(source=['foo.c', 'foo.cxx'], type='shlib') + # returns ['cxx', 'c', 'cxxshlib', 'cshlib'] + + :param source: source files to process + :type source: list of string or :py:class:`waflib.Node.Node` + :param type: object type in *program*, *shlib* or *stlib* + :type type: string + :return: the list of features for a task generator processing the source files + :rtype: list of string + """ + exts = get_extensions(kw.get('source', [])) + typ = kw['typ'] + feats = [] + + # watch the order, cxx will have the precedence + for x in 'cxx cpp c++ cc C'.split(): + if x in exts: + feats.append('cxx') + break + if 'c' in exts or 'vala' in exts or 'gs' in exts: + feats.append('c') + + if 's' in exts or 'S' in exts: + feats.append('asm') + + for x in 'f f90 F F90 for FOR'.split(): + if x in exts: + feats.append('fc') + break + + if 'd' in exts: + feats.append('d') + + if 'java' in exts: + feats.append('java') + return 'java' + + if typ in ('program', 'shlib', 'stlib'): + will_link = False + for x in feats: + if x in ('cxx', 'd', 'fc', 'c', 'asm'): + feats.append(x + typ) + will_link = True + if not will_link and not kw.get('features', []): + raise Errors.WafError('Unable to determine how to link %r, try adding eg: features="c cshlib"?' % kw) + return feats + +def set_features(kw, typ): + """ + Inserts data in the input dict *kw* based on existing data and on the type of target + required (typ). + + :param kw: task generator parameters + :type kw: dict + :param typ: type of target + :type typ: string + """ + kw['typ'] = typ + kw['features'] = Utils.to_list(kw.get('features', [])) + Utils.to_list(sniff_features(**kw)) + +@conf +def program(bld, *k, **kw): + """ + Alias for creating programs by looking at the file extensions:: + + def build(bld): + bld.program(source='foo.c', target='app') + # equivalent to: + # bld(features='c cprogram', source='foo.c', target='app') + + """ + set_features(kw, 'program') + return bld(*k, **kw) + +@conf +def shlib(bld, *k, **kw): + """ + Alias for creating shared libraries by looking at the file extensions:: + + def build(bld): + bld.shlib(source='foo.c', target='app') + # equivalent to: + # bld(features='c cshlib', source='foo.c', target='app') + + """ + set_features(kw, 'shlib') + return bld(*k, **kw) + +@conf +def stlib(bld, *k, **kw): + """ + Alias for creating static libraries by looking at the file extensions:: + + def build(bld): + bld.stlib(source='foo.cpp', target='app') + # equivalent to: + # bld(features='cxx cxxstlib', source='foo.cpp', target='app') + + """ + set_features(kw, 'stlib') + return bld(*k, **kw) + +@conf +def objects(bld, *k, **kw): + """ + Alias for creating object files by looking at the file extensions:: + + def build(bld): + bld.objects(source='foo.c', target='app') + # equivalent to: + # bld(features='c', source='foo.c', target='app') + + """ + set_features(kw, 'objects') + return bld(*k, **kw) + diff --git a/third_party/waf/waflib/Tools/c_config.py b/third_party/waf/waflib/Tools/c_config.py new file mode 100644 index 0000000..f5ab19b --- /dev/null +++ b/third_party/waf/waflib/Tools/c_config.py @@ -0,0 +1,1370 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +C/C++/D configuration helpers +""" + +from __future__ import with_statement + +import os, re, shlex +from waflib import Build, Utils, Task, Options, Logs, Errors, Runner +from waflib.TaskGen import after_method, feature +from waflib.Configure import conf + +WAF_CONFIG_H = 'config.h' +"""default name for the config.h file""" + +DEFKEYS = 'define_key' +INCKEYS = 'include_key' + +SNIP_EMPTY_PROGRAM = ''' +int main(int argc, char **argv) { + (void)argc; (void)argv; + return 0; +} +''' + +MACRO_TO_DESTOS = { +'__linux__' : 'linux', +'__GNU__' : 'gnu', # hurd +'__FreeBSD__' : 'freebsd', +'__NetBSD__' : 'netbsd', +'__OpenBSD__' : 'openbsd', +'__sun' : 'sunos', +'__hpux' : 'hpux', +'__sgi' : 'irix', +'_AIX' : 'aix', +'__CYGWIN__' : 'cygwin', +'__MSYS__' : 'cygwin', +'_UWIN' : 'uwin', +'_WIN64' : 'win32', +'_WIN32' : 'win32', +# Note about darwin: this is also tested with 'defined __APPLE__ && defined __MACH__' somewhere below in this file. +'__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__' : 'darwin', +'__ENVIRONMENT_IPHONE_OS_VERSION_MIN_REQUIRED__' : 'darwin', # iphone +'__QNX__' : 'qnx', +'__native_client__' : 'nacl' # google native client platform +} + +MACRO_TO_DEST_CPU = { +'__x86_64__' : 'x86_64', +'__amd64__' : 'x86_64', +'__i386__' : 'x86', +'__ia64__' : 'ia', +'__mips__' : 'mips', +'__sparc__' : 'sparc', +'__alpha__' : 'alpha', +'__aarch64__' : 'aarch64', +'__thumb__' : 'thumb', +'__arm__' : 'arm', +'__hppa__' : 'hppa', +'__powerpc__' : 'powerpc', +'__ppc__' : 'powerpc', +'__convex__' : 'convex', +'__m68k__' : 'm68k', +'__s390x__' : 's390x', +'__s390__' : 's390', +'__sh__' : 'sh', +'__xtensa__' : 'xtensa', +'__e2k__' : 'e2k', +'__riscv' : 'riscv', +} + +@conf +def parse_flags(self, line, uselib_store, env=None, force_static=False, posix=None): + """ + Parses flags from the input lines, and adds them to the relevant use variables:: + + def configure(conf): + conf.parse_flags('-O3', 'FOO') + # conf.env.CXXFLAGS_FOO = ['-O3'] + # conf.env.CFLAGS_FOO = ['-O3'] + + :param line: flags + :type line: string + :param uselib_store: where to add the flags + :type uselib_store: string + :param env: config set or conf.env by default + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :param force_static: force usage of static libraries + :type force_static: bool default False + :param posix: usage of POSIX mode for shlex lexical analiysis library + :type posix: bool default True + """ + + assert(isinstance(line, str)) + + env = env or self.env + + # Issue 811 and 1371 + if posix is None: + posix = True + if '\\' in line: + posix = ('\\ ' in line) or ('\\\\' in line) + + lex = shlex.shlex(line, posix=posix) + lex.whitespace_split = True + lex.commenters = '' + lst = list(lex) + + so_re = re.compile(r"\.so(?:\.[0-9]+)*$") + + # append_unique is not always possible + # for example, apple flags may require both -arch i386 and -arch ppc + uselib = uselib_store + def app(var, val): + env.append_value('%s_%s' % (var, uselib), val) + def appu(var, val): + env.append_unique('%s_%s' % (var, uselib), val) + static = False + while lst: + x = lst.pop(0) + st = x[:2] + ot = x[2:] + + if st == '-I' or st == '/I': + if not ot: + ot = lst.pop(0) + appu('INCLUDES', ot) + elif st == '-i': + tmp = [x, lst.pop(0)] + app('CFLAGS', tmp) + app('CXXFLAGS', tmp) + elif st == '-D' or (env.CXX_NAME == 'msvc' and st == '/D'): # not perfect but.. + if not ot: + ot = lst.pop(0) + app('DEFINES', ot) + elif st == '-l': + if not ot: + ot = lst.pop(0) + prefix = 'STLIB' if (force_static or static) else 'LIB' + app(prefix, ot) + elif st == '-L': + if not ot: + ot = lst.pop(0) + prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' + appu(prefix, ot) + elif x.startswith('/LIBPATH:'): + prefix = 'STLIBPATH' if (force_static or static) else 'LIBPATH' + appu(prefix, x.replace('/LIBPATH:', '')) + elif x.startswith('-std='): + prefix = 'CXXFLAGS' if '++' in x else 'CFLAGS' + app(prefix, x) + elif x.startswith('+') or x in ('-pthread', '-fPIC', '-fpic', '-fPIE', '-fpie', '-flto', '-fno-lto'): + app('CFLAGS', x) + app('CXXFLAGS', x) + app('LINKFLAGS', x) + elif x == '-framework': + appu('FRAMEWORK', lst.pop(0)) + elif x.startswith('-F'): + appu('FRAMEWORKPATH', x[2:]) + elif x == '-Wl,-rpath' or x == '-Wl,-R': + app('RPATH', lst.pop(0).lstrip('-Wl,')) + elif x.startswith('-Wl,-R,'): + app('RPATH', x[7:]) + elif x.startswith('-Wl,-R'): + app('RPATH', x[6:]) + elif x.startswith('-Wl,-rpath,'): + app('RPATH', x[11:]) + elif x == '-Wl,-Bstatic' or x == '-Bstatic': + static = True + elif x == '-Wl,-Bdynamic' or x == '-Bdynamic': + static = False + elif x.startswith('-Wl') or x in ('-rdynamic', '-pie'): + app('LINKFLAGS', x) + elif x.startswith(('-m', '-f', '-dynamic', '-O', '-g')): + # Adding the -W option breaks python builds on Openindiana + app('CFLAGS', x) + app('CXXFLAGS', x) + elif x.startswith('-bundle'): + app('LINKFLAGS', x) + elif x.startswith(('-undefined', '-Xlinker')): + arg = lst.pop(0) + app('LINKFLAGS', [x, arg]) + elif x.startswith(('-arch', '-isysroot')): + tmp = [x, lst.pop(0)] + app('CFLAGS', tmp) + app('CXXFLAGS', tmp) + app('LINKFLAGS', tmp) + elif x.endswith(('.a', '.dylib', '.lib')) or so_re.search(x): + appu('LINKFLAGS', x) # not cool, #762 + else: + self.to_log('Unhandled flag %r' % x) + +@conf +def validate_cfg(self, kw): + """ + Searches for the program *pkg-config* if missing, and validates the + parameters to pass to :py:func:`waflib.Tools.c_config.exec_cfg`. + + :param path: the **-config program to use** (default is *pkg-config*) + :type path: list of string + :param msg: message to display to describe the test executed + :type msg: string + :param okmsg: message to display when the test is successful + :type okmsg: string + :param errmsg: message to display in case of error + :type errmsg: string + """ + if not 'path' in kw: + if not self.env.PKGCONFIG: + self.find_program('pkg-config', var='PKGCONFIG') + kw['path'] = self.env.PKGCONFIG + + # verify that exactly one action is requested + s = ('atleast_pkgconfig_version' in kw) + ('modversion' in kw) + ('package' in kw) + if s != 1: + raise ValueError('exactly one of atleast_pkgconfig_version, modversion and package must be set') + if not 'msg' in kw: + if 'atleast_pkgconfig_version' in kw: + kw['msg'] = 'Checking for pkg-config version >= %r' % kw['atleast_pkgconfig_version'] + elif 'modversion' in kw: + kw['msg'] = 'Checking for %r version' % kw['modversion'] + else: + kw['msg'] = 'Checking for %r' %(kw['package']) + + # let the modversion check set the okmsg to the detected version + if not 'okmsg' in kw and not 'modversion' in kw: + kw['okmsg'] = 'yes' + if not 'errmsg' in kw: + kw['errmsg'] = 'not found' + + # pkg-config version + if 'atleast_pkgconfig_version' in kw: + pass + elif 'modversion' in kw: + if not 'uselib_store' in kw: + kw['uselib_store'] = kw['modversion'] + if not 'define_name' in kw: + kw['define_name'] = '%s_VERSION' % Utils.quote_define_name(kw['uselib_store']) + else: + if not 'uselib_store' in kw: + kw['uselib_store'] = Utils.to_list(kw['package'])[0].upper() + if not 'define_name' in kw: + kw['define_name'] = self.have_define(kw['uselib_store']) + +@conf +def exec_cfg(self, kw): + """ + Executes ``pkg-config`` or other ``-config`` applications to collect configuration flags: + + * if atleast_pkgconfig_version is given, check that pkg-config has the version n and return + * if modversion is given, then return the module version + * else, execute the *-config* program with the *args* and *variables* given, and set the flags on the *conf.env.FLAGS_name* variable + + :param path: the **-config program to use** + :type path: list of string + :param atleast_pkgconfig_version: minimum pkg-config version to use (disable other tests) + :type atleast_pkgconfig_version: string + :param package: package name, for example *gtk+-2.0* + :type package: string + :param uselib_store: if the test is successful, define HAVE\\_*name*. It is also used to define *conf.env.FLAGS_name* variables. + :type uselib_store: string + :param modversion: if provided, return the version of the given module and define *name*\\_VERSION + :type modversion: string + :param args: arguments to give to *package* when retrieving flags + :type args: list of string + :param variables: return the values of particular variables + :type variables: list of string + :param define_variable: additional variables to define (also in conf.env.PKG_CONFIG_DEFINES) + :type define_variable: dict(string: string) + :param pkg_config_path: paths where pkg-config should search for .pc config files (overrides env.PKG_CONFIG_PATH if exists) + :type pkg_config_path: string, list of directories separated by colon + :param force_static: force usage of static libraries + :type force_static: bool default False + :param posix: usage of POSIX mode for shlex lexical analiysis library + :type posix: bool default True + """ + + path = Utils.to_list(kw['path']) + env = self.env.env or None + if kw.get('pkg_config_path'): + if not env: + env = dict(self.environ) + env['PKG_CONFIG_PATH'] = kw['pkg_config_path'] + + def define_it(): + define_name = kw['define_name'] + # by default, add HAVE_X to the config.h, else provide DEFINES_X for use=X + if kw.get('global_define', 1): + self.define(define_name, 1, False) + else: + self.env.append_unique('DEFINES_%s' % kw['uselib_store'], "%s=1" % define_name) + + if kw.get('add_have_to_env', 1): + self.env[define_name] = 1 + + # pkg-config version + if 'atleast_pkgconfig_version' in kw: + cmd = path + ['--atleast-pkgconfig-version=%s' % kw['atleast_pkgconfig_version']] + self.cmd_and_log(cmd, env=env) + return + + # single version for a module + if 'modversion' in kw: + version = self.cmd_and_log(path + ['--modversion', kw['modversion']], env=env).strip() + if not 'okmsg' in kw: + kw['okmsg'] = version + self.define(kw['define_name'], version) + return version + + lst = [] + path + + defi = kw.get('define_variable') + if not defi: + defi = self.env.PKG_CONFIG_DEFINES or {} + for key, val in defi.items(): + lst.append('--define-variable=%s=%s' % (key, val)) + + static = kw.get('force_static', False) + if 'args' in kw: + args = Utils.to_list(kw['args']) + if '--static' in args or '--static-libs' in args: + static = True + lst += args + + # tools like pkgconf expect the package argument after the -- ones -_- + lst.extend(Utils.to_list(kw['package'])) + + # retrieving variables of a module + if 'variables' in kw: + v_env = kw.get('env', self.env) + vars = Utils.to_list(kw['variables']) + for v in vars: + val = self.cmd_and_log(lst + ['--variable=' + v], env=env).strip() + var = '%s_%s' % (kw['uselib_store'], v) + v_env[var] = val + return + + # so we assume the command-line will output flags to be parsed afterwards + ret = self.cmd_and_log(lst, env=env) + + define_it() + self.parse_flags(ret, kw['uselib_store'], kw.get('env', self.env), force_static=static, posix=kw.get('posix')) + return ret + +@conf +def check_cfg(self, *k, **kw): + """ + Checks for configuration flags using a **-config**-like program (pkg-config, sdl-config, etc). + This wraps internal calls to :py:func:`waflib.Tools.c_config.validate_cfg` and :py:func:`waflib.Tools.c_config.exec_cfg` + so check exec_cfg parameters descriptions for more details on kw passed + + A few examples:: + + def configure(conf): + conf.load('compiler_c') + conf.check_cfg(package='glib-2.0', args='--libs --cflags') + conf.check_cfg(package='pango') + conf.check_cfg(package='pango', uselib_store='MYPANGO', args=['--cflags', '--libs']) + conf.check_cfg(package='pango', + args=['pango >= 0.1.0', 'pango < 9.9.9', '--cflags', '--libs'], + msg="Checking for 'pango 0.1.0'") + conf.check_cfg(path='sdl-config', args='--cflags --libs', package='', uselib_store='SDL') + conf.check_cfg(path='mpicc', args='--showme:compile --showme:link', + package='', uselib_store='OPEN_MPI', mandatory=False) + # variables + conf.check_cfg(package='gtk+-2.0', variables=['includedir', 'prefix'], uselib_store='FOO') + print(conf.env.FOO_includedir) + """ + self.validate_cfg(kw) + if 'msg' in kw: + self.start_msg(kw['msg'], **kw) + ret = None + try: + ret = self.exec_cfg(kw) + except self.errors.WafError as e: + if 'errmsg' in kw: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + if Logs.verbose > 1: + self.to_log('Command failure: %s' % e) + self.fatal('The configuration failed') + else: + if not ret: + ret = True + kw['success'] = ret + if 'okmsg' in kw: + self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) + + return ret + +def build_fun(bld): + """ + Build function that is used for running configuration tests with ``conf.check()`` + """ + if bld.kw['compile_filename']: + node = bld.srcnode.make_node(bld.kw['compile_filename']) + node.write(bld.kw['code']) + + o = bld(features=bld.kw['features'], source=bld.kw['compile_filename'], target='testprog') + + for k, v in bld.kw.items(): + setattr(o, k, v) + + if not bld.kw.get('quiet'): + bld.conf.to_log("==>\n%s\n<==" % bld.kw['code']) + +@conf +def validate_c(self, kw): + """ + Pre-checks the parameters that will be given to :py:func:`waflib.Configure.run_build` + + :param compiler: c or cxx (tries to guess what is best) + :type compiler: string + :param type: cprogram, cshlib, cstlib - not required if *features are given directly* + :type type: binary to create + :param feature: desired features for the task generator that will execute the test, for example ``cxx cxxstlib`` + :type feature: list of string + :param fragment: provide a piece of code for the test (default is to let the system create one) + :type fragment: string + :param uselib_store: define variables after the test is executed (IMPORTANT!) + :type uselib_store: string + :param use: parameters to use for building (just like the normal *use* keyword) + :type use: list of string + :param define_name: define to set when the check is over + :type define_name: string + :param execute: execute the resulting binary + :type execute: bool + :param define_ret: if execute is set to True, use the execution output in both the define and the return value + :type define_ret: bool + :param header_name: check for a particular header + :type header_name: string + :param auto_add_header_name: if header_name was set, add the headers in env.INCKEYS so the next tests will include these headers + :type auto_add_header_name: bool + """ + for x in ('type_name', 'field_name', 'function_name'): + if x in kw: + Logs.warn('Invalid argument %r in test' % x) + + if not 'build_fun' in kw: + kw['build_fun'] = build_fun + + if not 'env' in kw: + kw['env'] = self.env.derive() + env = kw['env'] + + if not 'compiler' in kw and not 'features' in kw: + kw['compiler'] = 'c' + if env.CXX_NAME and Task.classes.get('cxx'): + kw['compiler'] = 'cxx' + if not self.env.CXX: + self.fatal('a c++ compiler is required') + else: + if not self.env.CC: + self.fatal('a c compiler is required') + + if not 'compile_mode' in kw: + kw['compile_mode'] = 'c' + if 'cxx' in Utils.to_list(kw.get('features', [])) or kw.get('compiler') == 'cxx': + kw['compile_mode'] = 'cxx' + + if not 'type' in kw: + kw['type'] = 'cprogram' + + if not 'features' in kw: + if not 'header_name' in kw or kw.get('link_header_test', True): + kw['features'] = [kw['compile_mode'], kw['type']] # "c ccprogram" + else: + kw['features'] = [kw['compile_mode']] + else: + kw['features'] = Utils.to_list(kw['features']) + + if not 'compile_filename' in kw: + kw['compile_filename'] = 'test.c' + ((kw['compile_mode'] == 'cxx') and 'pp' or '') + + def to_header(dct): + if 'header_name' in dct: + dct = Utils.to_list(dct['header_name']) + return ''.join(['#include <%s>\n' % x for x in dct]) + return '' + + if 'framework_name' in kw: + # OSX, not sure this is used anywhere + fwkname = kw['framework_name'] + if not 'uselib_store' in kw: + kw['uselib_store'] = fwkname.upper() + if not kw.get('no_header'): + fwk = '%s/%s.h' % (fwkname, fwkname) + if kw.get('remove_dot_h'): + fwk = fwk[:-2] + val = kw.get('header_name', []) + kw['header_name'] = Utils.to_list(val) + [fwk] + kw['msg'] = 'Checking for framework %s' % fwkname + kw['framework'] = fwkname + + elif 'header_name' in kw: + if not 'msg' in kw: + kw['msg'] = 'Checking for header %s' % kw['header_name'] + + l = Utils.to_list(kw['header_name']) + assert len(l), 'list of headers in header_name is empty' + + kw['code'] = to_header(kw) + SNIP_EMPTY_PROGRAM + if not 'uselib_store' in kw: + kw['uselib_store'] = l[0].upper() + if not 'define_name' in kw: + kw['define_name'] = self.have_define(l[0]) + + if 'lib' in kw: + if not 'msg' in kw: + kw['msg'] = 'Checking for library %s' % kw['lib'] + if not 'uselib_store' in kw: + kw['uselib_store'] = kw['lib'].upper() + + if 'stlib' in kw: + if not 'msg' in kw: + kw['msg'] = 'Checking for static library %s' % kw['stlib'] + if not 'uselib_store' in kw: + kw['uselib_store'] = kw['stlib'].upper() + + if 'fragment' in kw: + # an additional code fragment may be provided to replace the predefined code + # in custom headers + kw['code'] = kw['fragment'] + if not 'msg' in kw: + kw['msg'] = 'Checking for code snippet' + if not 'errmsg' in kw: + kw['errmsg'] = 'no' + + for (flagsname,flagstype) in (('cxxflags','compiler'), ('cflags','compiler'), ('linkflags','linker')): + if flagsname in kw: + if not 'msg' in kw: + kw['msg'] = 'Checking for %s flags %s' % (flagstype, kw[flagsname]) + if not 'errmsg' in kw: + kw['errmsg'] = 'no' + + if not 'execute' in kw: + kw['execute'] = False + if kw['execute']: + kw['features'].append('test_exec') + kw['chmod'] = Utils.O755 + + if not 'errmsg' in kw: + kw['errmsg'] = 'not found' + + if not 'okmsg' in kw: + kw['okmsg'] = 'yes' + + if not 'code' in kw: + kw['code'] = SNIP_EMPTY_PROGRAM + + # if there are headers to append automatically to the next tests + if self.env[INCKEYS]: + kw['code'] = '\n'.join(['#include <%s>' % x for x in self.env[INCKEYS]]) + '\n' + kw['code'] + + # in case defines lead to very long command-lines + if kw.get('merge_config_header') or env.merge_config_header: + kw['code'] = '%s\n\n%s' % (self.get_config_header(), kw['code']) + env.DEFINES = [] # modify the copy + + if not kw.get('success'): + kw['success'] = None + + if 'define_name' in kw: + self.undefine(kw['define_name']) + if not 'msg' in kw: + self.fatal('missing "msg" in conf.check(...)') + +@conf +def post_check(self, *k, **kw): + """ + Sets the variables after a test executed in + :py:func:`waflib.Tools.c_config.check` was run successfully + """ + is_success = 0 + if kw['execute']: + if kw['success'] is not None: + if kw.get('define_ret'): + is_success = kw['success'] + else: + is_success = (kw['success'] == 0) + else: + is_success = (kw['success'] == 0) + + if kw.get('define_name'): + comment = kw.get('comment', '') + define_name = kw['define_name'] + if kw['execute'] and kw.get('define_ret') and isinstance(is_success, str): + if kw.get('global_define', 1): + self.define(define_name, is_success, quote=kw.get('quote', 1), comment=comment) + else: + if kw.get('quote', 1): + succ = '"%s"' % is_success + else: + succ = int(is_success) + val = '%s=%s' % (define_name, succ) + var = 'DEFINES_%s' % kw['uselib_store'] + self.env.append_value(var, val) + else: + if kw.get('global_define', 1): + self.define_cond(define_name, is_success, comment=comment) + else: + var = 'DEFINES_%s' % kw['uselib_store'] + self.env.append_value(var, '%s=%s' % (define_name, int(is_success))) + + # define conf.env.HAVE_X to 1 + if kw.get('add_have_to_env', 1): + if kw.get('uselib_store'): + self.env[self.have_define(kw['uselib_store'])] = 1 + elif kw['execute'] and kw.get('define_ret'): + self.env[define_name] = is_success + else: + self.env[define_name] = int(is_success) + + if 'header_name' in kw: + if kw.get('auto_add_header_name'): + self.env.append_value(INCKEYS, Utils.to_list(kw['header_name'])) + + if is_success and 'uselib_store' in kw: + from waflib.Tools import ccroot + # See get_uselib_vars in ccroot.py + _vars = set() + for x in kw['features']: + if x in ccroot.USELIB_VARS: + _vars |= ccroot.USELIB_VARS[x] + + for k in _vars: + x = k.lower() + if x in kw: + self.env.append_value(k + '_' + kw['uselib_store'], kw[x]) + return is_success + +@conf +def check(self, *k, **kw): + """ + Performs a configuration test by calling :py:func:`waflib.Configure.run_build`. + For the complete list of parameters, see :py:func:`waflib.Tools.c_config.validate_c`. + To force a specific compiler, pass ``compiler='c'`` or ``compiler='cxx'`` to the list of arguments + + Besides build targets, complete builds can be given through a build function. All files will + be written to a temporary directory:: + + def build(bld): + lib_node = bld.srcnode.make_node('libdir/liblc1.c') + lib_node.parent.mkdir() + lib_node.write('#include <stdio.h>\\nint lib_func(void) { FILE *f = fopen("foo", "r");}\\n', 'w') + bld(features='c cshlib', source=[lib_node], linkflags=conf.env.EXTRA_LDFLAGS, target='liblc') + conf.check(build_fun=build, msg=msg) + """ + self.validate_c(kw) + self.start_msg(kw['msg'], **kw) + ret = None + try: + ret = self.run_build(*k, **kw) + except self.errors.ConfigurationError: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + if Logs.verbose > 1: + raise + else: + self.fatal('The configuration failed') + else: + kw['success'] = ret + + ret = self.post_check(*k, **kw) + if not ret: + self.end_msg(kw['errmsg'], 'YELLOW', **kw) + self.fatal('The configuration failed %r' % ret) + else: + self.end_msg(self.ret_msg(kw['okmsg'], kw), **kw) + return ret + +class test_exec(Task.Task): + """ + A task that runs programs after they are built. See :py:func:`waflib.Tools.c_config.test_exec_fun`. + """ + color = 'PINK' + def run(self): + cmd = [self.inputs[0].abspath()] + getattr(self.generator, 'test_args', []) + if getattr(self.generator, 'rpath', None): + if getattr(self.generator, 'define_ret', False): + self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd) + else: + self.generator.bld.retval = self.generator.bld.exec_command(cmd) + else: + env = self.env.env or {} + env.update(dict(os.environ)) + for var in ('LD_LIBRARY_PATH', 'DYLD_LIBRARY_PATH', 'PATH'): + env[var] = self.inputs[0].parent.abspath() + os.path.pathsep + env.get(var, '') + if getattr(self.generator, 'define_ret', False): + self.generator.bld.retval = self.generator.bld.cmd_and_log(cmd, env=env) + else: + self.generator.bld.retval = self.generator.bld.exec_command(cmd, env=env) + +@feature('test_exec') +@after_method('apply_link') +def test_exec_fun(self): + """ + The feature **test_exec** is used to create a task that will to execute the binary + created (link task output) during the build. The exit status will be set + on the build context, so only one program may have the feature *test_exec*. + This is used by configuration tests:: + + def configure(conf): + conf.check(execute=True) + """ + self.create_task('test_exec', self.link_task.outputs[0]) + +@conf +def check_cxx(self, *k, **kw): + """ + Runs a test with a task generator of the form:: + + conf.check(features='cxx cxxprogram', ...) + """ + kw['compiler'] = 'cxx' + return self.check(*k, **kw) + +@conf +def check_cc(self, *k, **kw): + """ + Runs a test with a task generator of the form:: + + conf.check(features='c cprogram', ...) + """ + kw['compiler'] = 'c' + return self.check(*k, **kw) + +@conf +def set_define_comment(self, key, comment): + """ + Sets a comment that will appear in the configuration header + + :type key: string + :type comment: string + """ + coms = self.env.DEFINE_COMMENTS + if not coms: + coms = self.env.DEFINE_COMMENTS = {} + coms[key] = comment or '' + +@conf +def get_define_comment(self, key): + """ + Returns the comment associated to a define + + :type key: string + """ + coms = self.env.DEFINE_COMMENTS or {} + return coms.get(key, '') + +@conf +def define(self, key, val, quote=True, comment=''): + """ + Stores a single define and its state into ``conf.env.DEFINES``. The value is cast to an integer (0/1). + + :param key: define name + :type key: string + :param val: value + :type val: int or string + :param quote: enclose strings in quotes (yes by default) + :type quote: bool + """ + assert isinstance(key, str) + if not key: + return + if val is True: + val = 1 + elif val in (False, None): + val = 0 + + if isinstance(val, int) or isinstance(val, float): + s = '%s=%s' + else: + s = quote and '%s="%s"' or '%s=%s' + app = s % (key, str(val)) + + ban = key + '=' + lst = self.env.DEFINES + for x in lst: + if x.startswith(ban): + lst[lst.index(x)] = app + break + else: + self.env.append_value('DEFINES', app) + + self.env.append_unique(DEFKEYS, key) + self.set_define_comment(key, comment) + +@conf +def undefine(self, key, comment=''): + """ + Removes a global define from ``conf.env.DEFINES`` + + :param key: define name + :type key: string + """ + assert isinstance(key, str) + if not key: + return + ban = key + '=' + lst = [x for x in self.env.DEFINES if not x.startswith(ban)] + self.env.DEFINES = lst + self.env.append_unique(DEFKEYS, key) + self.set_define_comment(key, comment) + +@conf +def define_cond(self, key, val, comment=''): + """ + Conditionally defines a name:: + + def configure(conf): + conf.define_cond('A', True) + # equivalent to: + # if val: conf.define('A', 1) + # else: conf.undefine('A') + + :param key: define name + :type key: string + :param val: value + :type val: int or string + """ + assert isinstance(key, str) + if not key: + return + if val: + self.define(key, 1, comment=comment) + else: + self.undefine(key, comment=comment) + +@conf +def is_defined(self, key): + """ + Indicates whether a particular define is globally set in ``conf.env.DEFINES``. + + :param key: define name + :type key: string + :return: True if the define is set + :rtype: bool + """ + assert key and isinstance(key, str) + + ban = key + '=' + for x in self.env.DEFINES: + if x.startswith(ban): + return True + return False + +@conf +def get_define(self, key): + """ + Returns the value of an existing define, or None if not found + + :param key: define name + :type key: string + :rtype: string + """ + assert key and isinstance(key, str) + + ban = key + '=' + for x in self.env.DEFINES: + if x.startswith(ban): + return x[len(ban):] + return None + +@conf +def have_define(self, key): + """ + Returns a variable suitable for command-line or header use by removing invalid characters + and prefixing it with ``HAVE_`` + + :param key: define name + :type key: string + :return: the input key prefixed by *HAVE_* and substitute any invalid characters. + :rtype: string + """ + return (self.env.HAVE_PAT or 'HAVE_%s') % Utils.quote_define_name(key) + +@conf +def write_config_header(self, configfile='', guard='', top=False, defines=True, headers=False, remove=True, define_prefix=''): + """ + Writes a configuration header containing defines and includes:: + + def configure(cnf): + cnf.define('A', 1) + cnf.write_config_header('config.h') + + This function only adds include guards (if necessary), consult + :py:func:`waflib.Tools.c_config.get_config_header` for details on the body. + + :param configfile: path to the file to create (relative or absolute) + :type configfile: string + :param guard: include guard name to add, by default it is computed from the file name + :type guard: string + :param top: write the configuration header from the build directory (default is from the current path) + :type top: bool + :param defines: add the defines (yes by default) + :type defines: bool + :param headers: add #include in the file + :type headers: bool + :param remove: remove the defines after they are added (yes by default, works like in autoconf) + :type remove: bool + :type define_prefix: string + :param define_prefix: prefix all the defines in the file with a particular prefix + """ + if not configfile: + configfile = WAF_CONFIG_H + waf_guard = guard or 'W_%s_WAF' % Utils.quote_define_name(configfile) + + node = top and self.bldnode or self.path.get_bld() + node = node.make_node(configfile) + node.parent.mkdir() + + lst = ['/* WARNING! All changes made to this file will be lost! */\n'] + lst.append('#ifndef %s\n#define %s\n' % (waf_guard, waf_guard)) + lst.append(self.get_config_header(defines, headers, define_prefix=define_prefix)) + lst.append('\n#endif /* %s */\n' % waf_guard) + + node.write('\n'.join(lst)) + + # config files must not be removed on "waf clean" + self.env.append_unique(Build.CFG_FILES, [node.abspath()]) + + if remove: + for key in self.env[DEFKEYS]: + self.undefine(key) + self.env[DEFKEYS] = [] + +@conf +def get_config_header(self, defines=True, headers=False, define_prefix=''): + """ + Creates the contents of a ``config.h`` file from the defines and includes + set in conf.env.define_key / conf.env.include_key. No include guards are added. + + A prelude will be added from the variable env.WAF_CONFIG_H_PRELUDE if provided. This + can be used to insert complex macros or include guards:: + + def configure(conf): + conf.env.WAF_CONFIG_H_PRELUDE = '#include <unistd.h>\\n' + conf.write_config_header('config.h') + + :param defines: write the defines values + :type defines: bool + :param headers: write include entries for each element in self.env.INCKEYS + :type headers: bool + :type define_prefix: string + :param define_prefix: prefix all the defines with a particular prefix + :return: the contents of a ``config.h`` file + :rtype: string + """ + lst = [] + + if self.env.WAF_CONFIG_H_PRELUDE: + lst.append(self.env.WAF_CONFIG_H_PRELUDE) + + if headers: + for x in self.env[INCKEYS]: + lst.append('#include <%s>' % x) + + if defines: + tbl = {} + for k in self.env.DEFINES: + a, _, b = k.partition('=') + tbl[a] = b + + for k in self.env[DEFKEYS]: + caption = self.get_define_comment(k) + if caption: + caption = ' /* %s */' % caption + try: + txt = '#define %s%s %s%s' % (define_prefix, k, tbl[k], caption) + except KeyError: + txt = '/* #undef %s%s */%s' % (define_prefix, k, caption) + lst.append(txt) + return "\n".join(lst) + +@conf +def cc_add_flags(conf): + """ + Adds CFLAGS / CPPFLAGS from os.environ to conf.env + """ + conf.add_os_flags('CPPFLAGS', dup=False) + conf.add_os_flags('CFLAGS', dup=False) + +@conf +def cxx_add_flags(conf): + """ + Adds CXXFLAGS / CPPFLAGS from os.environ to conf.env + """ + conf.add_os_flags('CPPFLAGS', dup=False) + conf.add_os_flags('CXXFLAGS', dup=False) + +@conf +def link_add_flags(conf): + """ + Adds LINKFLAGS / LDFLAGS from os.environ to conf.env + """ + conf.add_os_flags('LINKFLAGS', dup=False) + conf.add_os_flags('LDFLAGS', dup=False) + +@conf +def cc_load_tools(conf): + """ + Loads the Waf c extensions + """ + if not conf.env.DEST_OS: + conf.env.DEST_OS = Utils.unversioned_sys_platform() + conf.load('c') + +@conf +def cxx_load_tools(conf): + """ + Loads the Waf c++ extensions + """ + if not conf.env.DEST_OS: + conf.env.DEST_OS = Utils.unversioned_sys_platform() + conf.load('cxx') + +@conf +def get_cc_version(conf, cc, gcc=False, icc=False, clang=False): + """ + Runs the preprocessor to determine the gcc/icc/clang version + + The variables CC_VERSION, DEST_OS, DEST_BINFMT and DEST_CPU will be set in *conf.env* + + :raise: :py:class:`waflib.Errors.ConfigurationError` + """ + cmd = cc + ['-dM', '-E', '-'] + env = conf.env.env or None + try: + out, err = conf.cmd_and_log(cmd, output=0, input='\n'.encode(), env=env) + except Errors.WafError: + conf.fatal('Could not determine the compiler version %r' % cmd) + + if gcc: + if out.find('__INTEL_COMPILER') >= 0: + conf.fatal('The intel compiler pretends to be gcc') + if out.find('__GNUC__') < 0 and out.find('__clang__') < 0: + conf.fatal('Could not determine the compiler type') + + if icc and out.find('__INTEL_COMPILER') < 0: + conf.fatal('Not icc/icpc') + + if clang and out.find('__clang__') < 0: + conf.fatal('Not clang/clang++') + if not clang and out.find('__clang__') >= 0: + conf.fatal('Could not find gcc/g++ (only Clang), if renamed try eg: CC=gcc48 CXX=g++48 waf configure') + + k = {} + if icc or gcc or clang: + out = out.splitlines() + for line in out: + lst = shlex.split(line) + if len(lst)>2: + key = lst[1] + val = lst[2] + k[key] = val + + def isD(var): + return var in k + + # Some documentation is available at http://predef.sourceforge.net + # The names given to DEST_OS must match what Utils.unversioned_sys_platform() returns. + if not conf.env.DEST_OS: + conf.env.DEST_OS = '' + for i in MACRO_TO_DESTOS: + if isD(i): + conf.env.DEST_OS = MACRO_TO_DESTOS[i] + break + else: + if isD('__APPLE__') and isD('__MACH__'): + conf.env.DEST_OS = 'darwin' + elif isD('__unix__'): # unix must be tested last as it's a generic fallback + conf.env.DEST_OS = 'generic' + + if isD('__ELF__'): + conf.env.DEST_BINFMT = 'elf' + elif isD('__WINNT__') or isD('__CYGWIN__') or isD('_WIN32'): + conf.env.DEST_BINFMT = 'pe' + if not conf.env.IMPLIBDIR: + conf.env.IMPLIBDIR = conf.env.LIBDIR # for .lib or .dll.a files + conf.env.LIBDIR = conf.env.BINDIR + elif isD('__APPLE__'): + conf.env.DEST_BINFMT = 'mac-o' + + if not conf.env.DEST_BINFMT: + # Infer the binary format from the os name. + conf.env.DEST_BINFMT = Utils.destos_to_binfmt(conf.env.DEST_OS) + + for i in MACRO_TO_DEST_CPU: + if isD(i): + conf.env.DEST_CPU = MACRO_TO_DEST_CPU[i] + break + + Logs.debug('ccroot: dest platform: ' + ' '.join([conf.env[x] or '?' for x in ('DEST_OS', 'DEST_BINFMT', 'DEST_CPU')])) + if icc: + ver = k['__INTEL_COMPILER'] + conf.env.CC_VERSION = (ver[:-2], ver[-2], ver[-1]) + else: + if isD('__clang__') and isD('__clang_major__'): + conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) + else: + # older clang versions and gcc + conf.env.CC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k.get('__GNUC_PATCHLEVEL__', '0')) + return k + +@conf +def get_xlc_version(conf, cc): + """ + Returns the Aix compiler version + + :raise: :py:class:`waflib.Errors.ConfigurationError` + """ + cmd = cc + ['-qversion'] + try: + out, err = conf.cmd_and_log(cmd, output=0) + except Errors.WafError: + conf.fatal('Could not find xlc %r' % cmd) + + # the intention is to catch the 8.0 in "IBM XL C/C++ Enterprise Edition V8.0 for AIX..." + for v in (r"IBM XL C/C\+\+.* V(?P<major>\d*)\.(?P<minor>\d*)",): + version_re = re.compile(v, re.I).search + match = version_re(out or err) + if match: + k = match.groupdict() + conf.env.CC_VERSION = (k['major'], k['minor']) + break + else: + conf.fatal('Could not determine the XLC version.') + +@conf +def get_suncc_version(conf, cc): + """ + Returns the Sun compiler version + + :raise: :py:class:`waflib.Errors.ConfigurationError` + """ + cmd = cc + ['-V'] + try: + out, err = conf.cmd_and_log(cmd, output=0) + except Errors.WafError as e: + # Older versions of the compiler exit with non-zero status when reporting their version + if not (hasattr(e, 'returncode') and hasattr(e, 'stdout') and hasattr(e, 'stderr')): + conf.fatal('Could not find suncc %r' % cmd) + out = e.stdout + err = e.stderr + + version = (out or err) + version = version.splitlines()[0] + + # cc: Sun C 5.10 SunOS_i386 2009/06/03 + # cc: Studio 12.5 Sun C++ 5.14 SunOS_sparc Beta 2015/11/17 + # cc: WorkShop Compilers 5.0 98/12/15 C 5.0 + version_re = re.compile(r'cc: (studio.*?|\s+)?(sun\s+(c\+\+|c)|(WorkShop\s+Compilers))?\s+(?P<major>\d*)\.(?P<minor>\d*)', re.I).search + match = version_re(version) + if match: + k = match.groupdict() + conf.env.CC_VERSION = (k['major'], k['minor']) + else: + conf.fatal('Could not determine the suncc version.') + +# ============ the --as-needed flag should added during the configuration, not at runtime ========= + +@conf +def add_as_needed(self): + """ + Adds ``--as-needed`` to the *LINKFLAGS* + On some platforms, it is a default flag. In some cases (e.g., in NS-3) it is necessary to explicitly disable this feature with `-Wl,--no-as-needed` flag. + """ + if self.env.DEST_BINFMT == 'elf' and 'gcc' in (self.env.CXX_NAME, self.env.CC_NAME): + self.env.append_unique('LINKFLAGS', '-Wl,--as-needed') + +# ============ parallel configuration + +class cfgtask(Task.Task): + """ + A task that executes build configuration tests (calls conf.check) + + Make sure to use locks if concurrent access to the same conf.env data is necessary. + """ + def __init__(self, *k, **kw): + Task.Task.__init__(self, *k, **kw) + self.run_after = set() + + def display(self): + return '' + + def runnable_status(self): + for x in self.run_after: + if not x.hasrun: + return Task.ASK_LATER + return Task.RUN_ME + + def uid(self): + return Utils.SIG_NIL + + def signature(self): + return Utils.SIG_NIL + + def run(self): + conf = self.conf + bld = Build.BuildContext(top_dir=conf.srcnode.abspath(), out_dir=conf.bldnode.abspath()) + bld.env = conf.env + bld.init_dirs() + bld.in_msg = 1 # suppress top-level start_msg + bld.logger = self.logger + bld.multicheck_task = self + args = self.args + try: + if 'func' in args: + bld.test(build_fun=args['func'], + msg=args.get('msg', ''), + okmsg=args.get('okmsg', ''), + errmsg=args.get('errmsg', ''), + ) + else: + args['multicheck_mandatory'] = args.get('mandatory', True) + args['mandatory'] = True + try: + bld.check(**args) + finally: + args['mandatory'] = args['multicheck_mandatory'] + except Exception: + return 1 + + def process(self): + Task.Task.process(self) + if 'msg' in self.args: + with self.generator.bld.multicheck_lock: + self.conf.start_msg(self.args['msg']) + if self.hasrun == Task.NOT_RUN: + self.conf.end_msg('test cancelled', 'YELLOW') + elif self.hasrun != Task.SUCCESS: + self.conf.end_msg(self.args.get('errmsg', 'no'), 'YELLOW') + else: + self.conf.end_msg(self.args.get('okmsg', 'yes'), 'GREEN') + +@conf +def multicheck(self, *k, **kw): + """ + Runs configuration tests in parallel; results are printed sequentially at the end of the build + but each test must provide its own msg value to display a line:: + + def test_build(ctx): + ctx.in_msg = True # suppress console outputs + ctx.check_large_file(mandatory=False) + + conf.multicheck( + {'header_name':'stdio.h', 'msg':'... stdio', 'uselib_store':'STDIO', 'global_define':False}, + {'header_name':'xyztabcd.h', 'msg':'... optional xyztabcd.h', 'mandatory': False}, + {'header_name':'stdlib.h', 'msg':'... stdlib', 'okmsg': 'aye', 'errmsg': 'nope'}, + {'func': test_build, 'msg':'... testing an arbitrary build function', 'okmsg':'ok'}, + msg = 'Checking for headers in parallel', + mandatory = True, # mandatory tests raise an error at the end + run_all_tests = True, # try running all tests + ) + + The configuration tests may modify the values in conf.env in any order, and the define + values can affect configuration tests being executed. It is hence recommended + to provide `uselib_store` values with `global_define=False` to prevent such issues. + """ + self.start_msg(kw.get('msg', 'Executing %d configuration tests' % len(k)), **kw) + + # Force a copy so that threads append to the same list at least + # no order is guaranteed, but the values should not disappear at least + for var in ('DEFINES', DEFKEYS): + self.env.append_value(var, []) + self.env.DEFINE_COMMENTS = self.env.DEFINE_COMMENTS or {} + + # define a task object that will execute our tests + class par(object): + def __init__(self): + self.keep = False + self.task_sigs = {} + self.progress_bar = 0 + def total(self): + return len(tasks) + def to_log(self, *k, **kw): + return + + bld = par() + bld.keep = kw.get('run_all_tests', True) + bld.imp_sigs = {} + tasks = [] + + id_to_task = {} + for counter, dct in enumerate(k): + x = Task.classes['cfgtask'](bld=bld, env=None) + tasks.append(x) + x.args = dct + x.args['multicheck_counter'] = counter + x.bld = bld + x.conf = self + x.args = dct + + # bind a logger that will keep the info in memory + x.logger = Logs.make_mem_logger(str(id(x)), self.logger) + + if 'id' in dct: + id_to_task[dct['id']] = x + + # second pass to set dependencies with after_test/before_test + for x in tasks: + for key in Utils.to_list(x.args.get('before_tests', [])): + tsk = id_to_task[key] + if not tsk: + raise ValueError('No test named %r' % key) + tsk.run_after.add(x) + for key in Utils.to_list(x.args.get('after_tests', [])): + tsk = id_to_task[key] + if not tsk: + raise ValueError('No test named %r' % key) + x.run_after.add(tsk) + + def it(): + yield tasks + while 1: + yield [] + bld.producer = p = Runner.Parallel(bld, Options.options.jobs) + bld.multicheck_lock = Utils.threading.Lock() + p.biter = it() + + self.end_msg('started') + p.start() + + # flush the logs in order into the config.log + for x in tasks: + x.logger.memhandler.flush() + + self.start_msg('-> processing test results') + if p.error: + for x in p.error: + if getattr(x, 'err_msg', None): + self.to_log(x.err_msg) + self.end_msg('fail', color='RED') + raise Errors.WafError('There is an error in the library, read config.log for more information') + + failure_count = 0 + for x in tasks: + if x.hasrun not in (Task.SUCCESS, Task.NOT_RUN): + failure_count += 1 + + if failure_count: + self.end_msg(kw.get('errmsg', '%s test failed' % failure_count), color='YELLOW', **kw) + else: + self.end_msg('all ok', **kw) + + for x in tasks: + if x.hasrun != Task.SUCCESS: + if x.args.get('mandatory', True): + self.fatal(kw.get('fatalmsg') or 'One of the tests has failed, read config.log for more information') + +@conf +def check_gcc_o_space(self, mode='c'): + if int(self.env.CC_VERSION[0]) > 4: + # this is for old compilers + return + self.env.stash() + if mode == 'c': + self.env.CCLNK_TGT_F = ['-o', ''] + elif mode == 'cxx': + self.env.CXXLNK_TGT_F = ['-o', ''] + features = '%s %sshlib' % (mode, mode) + try: + self.check(msg='Checking if the -o link must be split from arguments', fragment=SNIP_EMPTY_PROGRAM, features=features) + except self.errors.ConfigurationError: + self.env.revert() + else: + self.env.commit() + diff --git a/third_party/waf/waflib/Tools/c_osx.py b/third_party/waf/waflib/Tools/c_osx.py new file mode 100644 index 0000000..f70b128 --- /dev/null +++ b/third_party/waf/waflib/Tools/c_osx.py @@ -0,0 +1,193 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy 2008-2018 (ita) + +""" +MacOSX related tools +""" + +import os, shutil, platform +from waflib import Task, Utils +from waflib.TaskGen import taskgen_method, feature, after_method, before_method + +app_info = ''' +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist SYSTEM "file://localhost/System/Library/DTDs/PropertyList.dtd"> +<plist version="0.9"> +<dict> + <key>CFBundlePackageType</key> + <string>APPL</string> + <key>CFBundleGetInfoString</key> + <string>Created by Waf</string> + <key>CFBundleSignature</key> + <string>????</string> + <key>NOTE</key> + <string>THIS IS A GENERATED FILE, DO NOT MODIFY</string> + <key>CFBundleExecutable</key> + <string>{app_name}</string> +</dict> +</plist> +''' +""" +plist template +""" + +@feature('c', 'cxx') +def set_macosx_deployment_target(self): + """ + see WAF issue 285 and also and also http://trac.macports.org/ticket/17059 + """ + if self.env.MACOSX_DEPLOYMENT_TARGET: + os.environ['MACOSX_DEPLOYMENT_TARGET'] = self.env.MACOSX_DEPLOYMENT_TARGET + elif 'MACOSX_DEPLOYMENT_TARGET' not in os.environ: + if Utils.unversioned_sys_platform() == 'darwin': + os.environ['MACOSX_DEPLOYMENT_TARGET'] = '.'.join(platform.mac_ver()[0].split('.')[:2]) + +@taskgen_method +def create_bundle_dirs(self, name, out): + """ + Creates bundle folders, used by :py:func:`create_task_macplist` and :py:func:`create_task_macapp` + """ + dir = out.parent.find_or_declare(name) + dir.mkdir() + macos = dir.find_or_declare(['Contents', 'MacOS']) + macos.mkdir() + return dir + +def bundle_name_for_output(out): + name = out.name + k = name.rfind('.') + if k >= 0: + name = name[:k] + '.app' + else: + name = name + '.app' + return name + +@feature('cprogram', 'cxxprogram') +@after_method('apply_link') +def create_task_macapp(self): + """ + To compile an executable into a Mac application (a .app), set its *mac_app* attribute:: + + def build(bld): + bld.shlib(source='a.c', target='foo', mac_app=True) + + To force *all* executables to be transformed into Mac applications:: + + def build(bld): + bld.env.MACAPP = True + bld.shlib(source='a.c', target='foo') + """ + if self.env.MACAPP or getattr(self, 'mac_app', False): + out = self.link_task.outputs[0] + + name = bundle_name_for_output(out) + dir = self.create_bundle_dirs(name, out) + + n1 = dir.find_or_declare(['Contents', 'MacOS', out.name]) + + self.apptask = self.create_task('macapp', self.link_task.outputs, n1) + inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/MacOS/' % name + self.add_install_files(install_to=inst_to, install_from=n1, chmod=Utils.O755) + + if getattr(self, 'mac_files', None): + # this only accepts files; they will be installed as seen from mac_files_root + mac_files_root = getattr(self, 'mac_files_root', None) + if isinstance(mac_files_root, str): + mac_files_root = self.path.find_node(mac_files_root) + if not mac_files_root: + self.bld.fatal('Invalid mac_files_root %r' % self.mac_files_root) + res_dir = n1.parent.parent.make_node('Resources') + inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Resources' % name + for node in self.to_nodes(self.mac_files): + relpath = node.path_from(mac_files_root or node.parent) + self.create_task('macapp', node, res_dir.make_node(relpath)) + self.add_install_as(install_to=os.path.join(inst_to, relpath), install_from=node) + + if getattr(self.bld, 'is_install', None): + # disable regular binary installation + self.install_task.hasrun = Task.SKIP_ME + +@feature('cprogram', 'cxxprogram') +@after_method('apply_link') +def create_task_macplist(self): + """ + Creates a :py:class:`waflib.Tools.c_osx.macplist` instance. + """ + if self.env.MACAPP or getattr(self, 'mac_app', False): + out = self.link_task.outputs[0] + + name = bundle_name_for_output(out) + + dir = self.create_bundle_dirs(name, out) + n1 = dir.find_or_declare(['Contents', 'Info.plist']) + self.plisttask = plisttask = self.create_task('macplist', [], n1) + plisttask.context = { + 'app_name': self.link_task.outputs[0].name, + 'env': self.env + } + + plist_ctx = getattr(self, 'plist_context', None) + if (plist_ctx): + plisttask.context.update(plist_ctx) + + if getattr(self, 'mac_plist', False): + node = self.path.find_resource(self.mac_plist) + if node: + plisttask.inputs.append(node) + else: + plisttask.code = self.mac_plist + else: + plisttask.code = app_info + + inst_to = getattr(self, 'install_path', '/Applications') + '/%s/Contents/' % name + self.add_install_files(install_to=inst_to, install_from=n1) + +@feature('cshlib', 'cxxshlib') +@before_method('apply_link', 'propagate_uselib_vars') +def apply_bundle(self): + """ + To make a bundled shared library (a ``.bundle``), set the *mac_bundle* attribute:: + + def build(bld): + bld.shlib(source='a.c', target='foo', mac_bundle = True) + + To force *all* executables to be transformed into bundles:: + + def build(bld): + bld.env.MACBUNDLE = True + bld.shlib(source='a.c', target='foo') + """ + if self.env.MACBUNDLE or getattr(self, 'mac_bundle', False): + self.env.LINKFLAGS_cshlib = self.env.LINKFLAGS_cxxshlib = [] # disable the '-dynamiclib' flag + self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN + use = self.use = self.to_list(getattr(self, 'use', [])) + if not 'MACBUNDLE' in use: + use.append('MACBUNDLE') + +app_dirs = ['Contents', 'Contents/MacOS', 'Contents/Resources'] + +class macapp(Task.Task): + """ + Creates mac applications + """ + color = 'PINK' + def run(self): + self.outputs[0].parent.mkdir() + shutil.copy2(self.inputs[0].srcpath(), self.outputs[0].abspath()) + +class macplist(Task.Task): + """ + Creates plist files + """ + color = 'PINK' + ext_in = ['.bin'] + def run(self): + if getattr(self, 'code', None): + txt = self.code + else: + txt = self.inputs[0].read() + context = getattr(self, 'context', {}) + txt = txt.format(**context) + self.outputs[0].write(txt) + diff --git a/third_party/waf/waflib/Tools/c_preproc.py b/third_party/waf/waflib/Tools/c_preproc.py new file mode 100644 index 0000000..68e5f5a --- /dev/null +++ b/third_party/waf/waflib/Tools/c_preproc.py @@ -0,0 +1,1091 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +""" +C/C++ preprocessor for finding dependencies + +Reasons for using the Waf preprocessor by default + +#. Some c/c++ extensions (Qt) require a custom preprocessor for obtaining the dependencies (.moc files) +#. Not all compilers provide .d files for obtaining the dependencies (portability) +#. A naive file scanner will not catch the constructs such as "#include foo()" +#. A naive file scanner will catch unnecessary dependencies (change an unused header -> recompile everything) + +Regarding the speed concerns: + +* the preprocessing is performed only when files must be compiled +* the macros are evaluated only for #if/#elif/#include +* system headers are not scanned by default + +Now if you do not want the Waf preprocessor, the tool +gccdeps* uses the .d files produced +during the compilation to track the dependencies (useful when used with the boost libraries). +It only works with gcc >= 4.4 though. + +A dumb preprocessor is also available in the tool *c_dumbpreproc* +""" +# TODO: more varargs, pragma once + +import re, string, traceback +from waflib import Logs, Utils, Errors + +class PreprocError(Errors.WafError): + pass + +FILE_CACHE_SIZE = 100000 +LINE_CACHE_SIZE = 100000 + +POPFILE = '-' +"Constant representing a special token used in :py:meth:`waflib.Tools.c_preproc.c_parser.start` iteration to switch to a header read previously" + +recursion_limit = 150 +"Limit on the amount of files to read in the dependency scanner" + +go_absolute = False +"Set to True to track headers on files in /usr/include, else absolute paths are ignored (but it becomes very slow)" + +standard_includes = ['/usr/local/include', '/usr/include'] +if Utils.is_win32: + standard_includes = [] + +use_trigraphs = 0 +"""Apply trigraph rules (False by default)""" + +# obsolete, do not use +strict_quotes = 0 + +g_optrans = { +'not':'!', +'not_eq':'!', +'and':'&&', +'and_eq':'&=', +'or':'||', +'or_eq':'|=', +'xor':'^', +'xor_eq':'^=', +'bitand':'&', +'bitor':'|', +'compl':'~', +} +"""Operators such as and/or/xor for c++. Set an empty dict to disable.""" + +# ignore #warning and #error +re_lines = re.compile( + '^[ \t]*(?:#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$', + re.IGNORECASE | re.MULTILINE) +"""Match #include lines""" + +re_mac = re.compile(r"^[a-zA-Z_]\w*") +"""Match macro definitions""" + +re_fun = re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]') +"""Match macro functions""" + +re_pragma_once = re.compile(r'^\s*once\s*', re.IGNORECASE) +"""Match #pragma once statements""" + +re_nl = re.compile('\\\\\r*\n', re.MULTILINE) +"""Match newlines""" + +re_cpp = re.compile(r'//.*?$|/\*.*?\*/|\'(?:\\.|[^\\\'])*\'|"(?:\\.|[^\\"])*"', re.DOTALL | re.MULTILINE ) +"""Filter C/C++ comments""" + +trig_def = [('??'+a, b) for a, b in zip("=-/!'()<>", r'#~\|^[]{}')] +"""Trigraph definitions""" + +chr_esc = {'0':0, 'a':7, 'b':8, 't':9, 'n':10, 'f':11, 'v':12, 'r':13, '\\':92, "'":39} +"""Escape characters""" + +NUM = 'i' +"""Number token""" + +OP = 'O' +"""Operator token""" + +IDENT = 'T' +"""Identifier token""" + +STR = 's' +"""String token""" + +CHAR = 'c' +"""Character token""" + +tok_types = [NUM, STR, IDENT, OP] +"""Token types""" + +exp_types = [ + r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""", + r'L?"([^"\\]|\\.)*"', + r'[a-zA-Z_]\w*', + r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]', +] +"""Expression types""" + +re_clexer = re.compile('|'.join(["(?P<%s>%s)" % (name, part) for name, part in zip(tok_types, exp_types)]), re.M) +"""Match expressions into tokens""" + +accepted = 'a' +"""Parser state is *accepted*""" + +ignored = 'i' +"""Parser state is *ignored*, for example preprocessor lines in an #if 0 block""" + +undefined = 'u' +"""Parser state is *undefined* at the moment""" + +skipped = 's' +"""Parser state is *skipped*, for example preprocessor lines in a #elif 0 block""" + +def repl(m): + """Replace function used with :py:attr:`waflib.Tools.c_preproc.re_cpp`""" + s = m.group() + if s[0] == '/': + return ' ' + return s + +prec = {} +""" +Operator precedence rules required for parsing expressions of the form:: + + #if 1 && 2 != 0 +""" +ops = ['* / %', '+ -', '<< >>', '< <= >= >', '== !=', '& | ^', '&& ||', ','] +for x, syms in enumerate(ops): + for u in syms.split(): + prec[u] = x + +def reduce_nums(val_1, val_2, val_op): + """ + Apply arithmetic rules to compute a result + + :param val1: input parameter + :type val1: int or string + :param val2: input parameter + :type val2: int or string + :param val_op: C operator in *+*, */*, *-*, etc + :type val_op: string + :rtype: int + """ + #print val_1, val_2, val_op + + # now perform the operation, make certain a and b are numeric + try: + a = 0 + val_1 + except TypeError: + a = int(val_1) + try: + b = 0 + val_2 + except TypeError: + b = int(val_2) + + d = val_op + if d == '%': + c = a % b + elif d=='+': + c = a + b + elif d=='-': + c = a - b + elif d=='*': + c = a * b + elif d=='/': + c = a / b + elif d=='^': + c = a ^ b + elif d=='==': + c = int(a == b) + elif d=='|' or d == 'bitor': + c = a | b + elif d=='||' or d == 'or' : + c = int(a or b) + elif d=='&' or d == 'bitand': + c = a & b + elif d=='&&' or d == 'and': + c = int(a and b) + elif d=='!=' or d == 'not_eq': + c = int(a != b) + elif d=='^' or d == 'xor': + c = int(a^b) + elif d=='<=': + c = int(a <= b) + elif d=='<': + c = int(a < b) + elif d=='>': + c = int(a > b) + elif d=='>=': + c = int(a >= b) + elif d=='<<': + c = a << b + elif d=='>>': + c = a >> b + else: + c = 0 + return c + +def get_num(lst): + """ + Try to obtain a number from a list of tokens. The token types are defined in :py:attr:`waflib.Tools.ccroot.tok_types`. + + :param lst: list of preprocessor tokens + :type lst: list of tuple (tokentype, value) + :return: a pair containing the number and the rest of the list + :rtype: tuple(value, list) + """ + if not lst: + raise PreprocError('empty list for get_num') + (p, v) = lst[0] + if p == OP: + if v == '(': + count_par = 1 + i = 1 + while i < len(lst): + (p, v) = lst[i] + + if p == OP: + if v == ')': + count_par -= 1 + if count_par == 0: + break + elif v == '(': + count_par += 1 + i += 1 + else: + raise PreprocError('rparen expected %r' % lst) + + (num, _) = get_term(lst[1:i]) + return (num, lst[i+1:]) + + elif v == '+': + return get_num(lst[1:]) + elif v == '-': + num, lst = get_num(lst[1:]) + return (reduce_nums('-1', num, '*'), lst) + elif v == '!': + num, lst = get_num(lst[1:]) + return (int(not int(num)), lst) + elif v == '~': + num, lst = get_num(lst[1:]) + return (~ int(num), lst) + else: + raise PreprocError('Invalid op token %r for get_num' % lst) + elif p == NUM: + return v, lst[1:] + elif p == IDENT: + # all macros should have been replaced, remaining identifiers eval to 0 + return 0, lst[1:] + else: + raise PreprocError('Invalid token %r for get_num' % lst) + +def get_term(lst): + """ + Evaluate an expression recursively, for example:: + + 1+1+1 -> 2+1 -> 3 + + :param lst: list of tokens + :type lst: list of tuple(token, value) + :return: the value and the remaining tokens + :rtype: value, list + """ + + if not lst: + raise PreprocError('empty list for get_term') + num, lst = get_num(lst) + if not lst: + return (num, []) + (p, v) = lst[0] + if p == OP: + if v == ',': + # skip + return get_term(lst[1:]) + elif v == '?': + count_par = 0 + i = 1 + while i < len(lst): + (p, v) = lst[i] + + if p == OP: + if v == ')': + count_par -= 1 + elif v == '(': + count_par += 1 + elif v == ':': + if count_par == 0: + break + i += 1 + else: + raise PreprocError('rparen expected %r' % lst) + + if int(num): + return get_term(lst[1:i]) + else: + return get_term(lst[i+1:]) + + else: + num2, lst = get_num(lst[1:]) + + if not lst: + # no more tokens to process + num2 = reduce_nums(num, num2, v) + return get_term([(NUM, num2)] + lst) + + # operator precedence + p2, v2 = lst[0] + if p2 != OP: + raise PreprocError('op expected %r' % lst) + + if prec[v2] >= prec[v]: + num2 = reduce_nums(num, num2, v) + return get_term([(NUM, num2)] + lst) + else: + num3, lst = get_num(lst[1:]) + num3 = reduce_nums(num2, num3, v2) + return get_term([(NUM, num), (p, v), (NUM, num3)] + lst) + + + raise PreprocError('cannot reduce %r' % lst) + +def reduce_eval(lst): + """ + Take a list of tokens and output true or false for #if/#elif conditions. + + :param lst: a list of tokens + :type lst: list of tuple(token, value) + :return: a token + :rtype: tuple(NUM, int) + """ + num, lst = get_term(lst) + return (NUM, num) + +def stringize(lst): + """ + Merge a list of tokens into a string + + :param lst: a list of tokens + :type lst: list of tuple(token, value) + :rtype: string + """ + lst = [str(v2) for (p2, v2) in lst] + return "".join(lst) + +def paste_tokens(t1, t2): + """ + Token pasting works between identifiers, particular operators, and identifiers and numbers:: + + a ## b -> ab + > ## = -> >= + a ## 2 -> a2 + + :param t1: token + :type t1: tuple(type, value) + :param t2: token + :type t2: tuple(type, value) + """ + p1 = None + if t1[0] == OP and t2[0] == OP: + p1 = OP + elif t1[0] == IDENT and (t2[0] == IDENT or t2[0] == NUM): + p1 = IDENT + elif t1[0] == NUM and t2[0] == NUM: + p1 = NUM + if not p1: + raise PreprocError('tokens do not make a valid paste %r and %r' % (t1, t2)) + return (p1, t1[1] + t2[1]) + +def reduce_tokens(lst, defs, ban=[]): + """ + Replace the tokens in lst, using the macros provided in defs, and a list of macros that cannot be re-applied + + :param lst: list of tokens + :type lst: list of tuple(token, value) + :param defs: macro definitions + :type defs: dict + :param ban: macros that cannot be substituted (recursion is not allowed) + :type ban: list of string + :return: the new list of tokens + :rtype: value, list + """ + + i = 0 + while i < len(lst): + (p, v) = lst[i] + + if p == IDENT and v == "defined": + del lst[i] + if i < len(lst): + (p2, v2) = lst[i] + if p2 == IDENT: + if v2 in defs: + lst[i] = (NUM, 1) + else: + lst[i] = (NUM, 0) + elif p2 == OP and v2 == '(': + del lst[i] + (p2, v2) = lst[i] + del lst[i] # remove the ident, and change the ) for the value + if v2 in defs: + lst[i] = (NUM, 1) + else: + lst[i] = (NUM, 0) + else: + raise PreprocError('Invalid define expression %r' % lst) + + elif p == IDENT and v in defs: + + if isinstance(defs[v], str): + a, b = extract_macro(defs[v]) + defs[v] = b + macro_def = defs[v] + to_add = macro_def[1] + + if isinstance(macro_def[0], list): + # macro without arguments + del lst[i] + accu = to_add[:] + reduce_tokens(accu, defs, ban+[v]) + for tmp in accu: + lst.insert(i, tmp) + i += 1 + else: + # collect the arguments for the funcall + + args = [] + del lst[i] + + if i >= len(lst): + raise PreprocError('expected ( after %r (got nothing)' % v) + + (p2, v2) = lst[i] + if p2 != OP or v2 != '(': + raise PreprocError('expected ( after %r' % v) + + del lst[i] + + one_param = [] + count_paren = 0 + while i < len(lst): + p2, v2 = lst[i] + + del lst[i] + if p2 == OP and count_paren == 0: + if v2 == '(': + one_param.append((p2, v2)) + count_paren += 1 + elif v2 == ')': + if one_param: + args.append(one_param) + break + elif v2 == ',': + if not one_param: + raise PreprocError('empty param in funcall %r' % v) + args.append(one_param) + one_param = [] + else: + one_param.append((p2, v2)) + else: + one_param.append((p2, v2)) + if v2 == '(': + count_paren += 1 + elif v2 == ')': + count_paren -= 1 + else: + raise PreprocError('malformed macro') + + # substitute the arguments within the define expression + accu = [] + arg_table = macro_def[0] + j = 0 + while j < len(to_add): + (p2, v2) = to_add[j] + + if p2 == OP and v2 == '#': + # stringize is for arguments only + if j+1 < len(to_add) and to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: + toks = args[arg_table[to_add[j+1][1]]] + accu.append((STR, stringize(toks))) + j += 1 + else: + accu.append((p2, v2)) + elif p2 == OP and v2 == '##': + # token pasting, how can man invent such a complicated system? + if accu and j+1 < len(to_add): + # we have at least two tokens + + t1 = accu[-1] + + if to_add[j+1][0] == IDENT and to_add[j+1][1] in arg_table: + toks = args[arg_table[to_add[j+1][1]]] + + if toks: + accu[-1] = paste_tokens(t1, toks[0]) #(IDENT, accu[-1][1] + toks[0][1]) + accu.extend(toks[1:]) + else: + # error, case "a##" + accu.append((p2, v2)) + accu.extend(toks) + elif to_add[j+1][0] == IDENT and to_add[j+1][1] == '__VA_ARGS__': + # first collect the tokens + va_toks = [] + st = len(macro_def[0]) + pt = len(args) + for x in args[pt-st+1:]: + va_toks.extend(x) + va_toks.append((OP, ',')) + if va_toks: + va_toks.pop() # extra comma + if len(accu)>1: + (p3, v3) = accu[-1] + (p4, v4) = accu[-2] + if v3 == '##': + # remove the token paste + accu.pop() + if v4 == ',' and pt < st: + # remove the comma + accu.pop() + accu += va_toks + else: + accu[-1] = paste_tokens(t1, to_add[j+1]) + + j += 1 + else: + # Invalid paste, case "##a" or "b##" + accu.append((p2, v2)) + + elif p2 == IDENT and v2 in arg_table: + toks = args[arg_table[v2]] + reduce_tokens(toks, defs, ban+[v]) + accu.extend(toks) + else: + accu.append((p2, v2)) + + j += 1 + + + reduce_tokens(accu, defs, ban+[v]) + + for x in range(len(accu)-1, -1, -1): + lst.insert(i, accu[x]) + + i += 1 + + +def eval_macro(lst, defs): + """ + Reduce the tokens by :py:func:`waflib.Tools.c_preproc.reduce_tokens` and try to return a 0/1 result by :py:func:`waflib.Tools.c_preproc.reduce_eval`. + + :param lst: list of tokens + :type lst: list of tuple(token, value) + :param defs: macro definitions + :type defs: dict + :rtype: int + """ + reduce_tokens(lst, defs, []) + if not lst: + raise PreprocError('missing tokens to evaluate') + + if lst: + p, v = lst[0] + if p == IDENT and v not in defs: + raise PreprocError('missing macro %r' % lst) + + p, v = reduce_eval(lst) + return int(v) != 0 + +def extract_macro(txt): + """ + Process a macro definition of the form:: + #define f(x, y) x * y + + into a function or a simple macro without arguments + + :param txt: expression to exact a macro definition from + :type txt: string + :return: a tuple containing the name, the list of arguments and the replacement + :rtype: tuple(string, [list, list]) + """ + t = tokenize(txt) + if re_fun.search(txt): + p, name = t[0] + + p, v = t[1] + if p != OP: + raise PreprocError('expected (') + + i = 1 + pindex = 0 + params = {} + prev = '(' + + while 1: + i += 1 + p, v = t[i] + + if prev == '(': + if p == IDENT: + params[v] = pindex + pindex += 1 + prev = p + elif p == OP and v == ')': + break + else: + raise PreprocError('unexpected token (3)') + elif prev == IDENT: + if p == OP and v == ',': + prev = v + elif p == OP and v == ')': + break + else: + raise PreprocError('comma or ... expected') + elif prev == ',': + if p == IDENT: + params[v] = pindex + pindex += 1 + prev = p + elif p == OP and v == '...': + raise PreprocError('not implemented (1)') + else: + raise PreprocError('comma or ... expected (2)') + elif prev == '...': + raise PreprocError('not implemented (2)') + else: + raise PreprocError('unexpected else') + + #~ print (name, [params, t[i+1:]]) + return (name, [params, t[i+1:]]) + else: + (p, v) = t[0] + if len(t) > 1: + return (v, [[], t[1:]]) + else: + # empty define, assign an empty token + return (v, [[], [('T','')]]) + +re_include = re.compile(r'^\s*(<(?:.*)>|"(?:.*)")') +def extract_include(txt, defs): + """ + Process a line in the form:: + + #include foo + + :param txt: include line to process + :type txt: string + :param defs: macro definitions + :type defs: dict + :return: the file name + :rtype: string + """ + m = re_include.search(txt) + if m: + txt = m.group(1) + return txt[0], txt[1:-1] + + # perform preprocessing and look at the result, it must match an include + toks = tokenize(txt) + reduce_tokens(toks, defs, ['waf_include']) + + if not toks: + raise PreprocError('could not parse include %r' % txt) + + if len(toks) == 1: + if toks[0][0] == STR: + return '"', toks[0][1] + else: + if toks[0][1] == '<' and toks[-1][1] == '>': + ret = '<', stringize(toks).lstrip('<').rstrip('>') + return ret + + raise PreprocError('could not parse include %r' % txt) + +def parse_char(txt): + """ + Parse a c character + + :param txt: character to parse + :type txt: string + :return: a character literal + :rtype: string + """ + + if not txt: + raise PreprocError('attempted to parse a null char') + if txt[0] != '\\': + return ord(txt) + c = txt[1] + if c == 'x': + if len(txt) == 4 and txt[3] in string.hexdigits: + return int(txt[2:], 16) + return int(txt[2:], 16) + elif c.isdigit(): + if c == '0' and len(txt)==2: + return 0 + for i in 3, 2, 1: + if len(txt) > i and txt[1:1+i].isdigit(): + return (1+i, int(txt[1:1+i], 8)) + else: + try: + return chr_esc[c] + except KeyError: + raise PreprocError('could not parse char literal %r' % txt) + +def tokenize(s): + """ + Convert a string into a list of tokens (shlex.split does not apply to c/c++/d) + + :param s: input to tokenize + :type s: string + :return: a list of tokens + :rtype: list of tuple(token, value) + """ + return tokenize_private(s)[:] # force a copy of the results + +def tokenize_private(s): + ret = [] + for match in re_clexer.finditer(s): + m = match.group + for name in tok_types: + v = m(name) + if v: + if name == IDENT: + if v in g_optrans: + name = OP + elif v.lower() == "true": + v = 1 + name = NUM + elif v.lower() == "false": + v = 0 + name = NUM + elif name == NUM: + if m('oct'): + v = int(v, 8) + elif m('hex'): + v = int(m('hex'), 16) + elif m('n0'): + v = m('n0') + else: + v = m('char') + if v: + v = parse_char(v) + else: + v = m('n2') or m('n4') + elif name == OP: + if v == '%:': + v = '#' + elif v == '%:%:': + v = '##' + elif name == STR: + # remove the quotes around the string + v = v[1:-1] + ret.append((name, v)) + break + return ret + +def format_defines(lst): + ret = [] + for y in lst: + if y: + pos = y.find('=') + if pos == -1: + # "-DFOO" should give "#define FOO 1" + ret.append(y) + elif pos > 0: + # all others are assumed to be -DX=Y + ret.append('%s %s' % (y[:pos], y[pos+1:])) + else: + raise ValueError('Invalid define expression %r' % y) + return ret + +class c_parser(object): + """ + Used by :py:func:`waflib.Tools.c_preproc.scan` to parse c/h files. Note that by default, + only project headers are parsed. + """ + def __init__(self, nodepaths=None, defines=None): + self.lines = [] + """list of lines read""" + + if defines is None: + self.defs = {} + else: + self.defs = dict(defines) # make a copy + self.state = [] + + self.count_files = 0 + self.currentnode_stack = [] + + self.nodepaths = nodepaths or [] + """Include paths""" + + self.nodes = [] + """List of :py:class:`waflib.Node.Node` found so far""" + + self.names = [] + """List of file names that could not be matched by any file""" + + self.curfile = '' + """Current file""" + + self.ban_includes = set() + """Includes that must not be read (#pragma once)""" + + self.listed = set() + """Include nodes/names already listed to avoid duplicates in self.nodes/self.names""" + + def cached_find_resource(self, node, filename): + """ + Find a file from the input directory + + :param node: directory + :type node: :py:class:`waflib.Node.Node` + :param filename: header to find + :type filename: string + :return: the node if found, or None + :rtype: :py:class:`waflib.Node.Node` + """ + try: + cache = node.ctx.preproc_cache_node + except AttributeError: + cache = node.ctx.preproc_cache_node = Utils.lru_cache(FILE_CACHE_SIZE) + + key = (node, filename) + try: + return cache[key] + except KeyError: + ret = node.find_resource(filename) + if ret: + if getattr(ret, 'children', None): + ret = None + elif ret.is_child_of(node.ctx.bldnode): + tmp = node.ctx.srcnode.search_node(ret.path_from(node.ctx.bldnode)) + if tmp and getattr(tmp, 'children', None): + ret = None + cache[key] = ret + return ret + + def tryfind(self, filename, kind='"', env=None): + """ + Try to obtain a node from the filename based from the include paths. Will add + the node found to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` or the file name to + :py:attr:`waflib.Tools.c_preproc.c_parser.names` if no corresponding file is found. Called by + :py:attr:`waflib.Tools.c_preproc.c_parser.start`. + + :param filename: header to find + :type filename: string + :return: the node if found + :rtype: :py:class:`waflib.Node.Node` + """ + if filename.endswith('.moc'): + # we could let the qt4 module use a subclass, but then the function "scan" below must be duplicated + # in the qt4 and in the qt5 classes. So we have two lines here and it is sufficient. + self.names.append(filename) + return None + + self.curfile = filename + + found = None + if kind == '"': + if env.MSVC_VERSION: + for n in reversed(self.currentnode_stack): + found = self.cached_find_resource(n, filename) + if found: + break + else: + found = self.cached_find_resource(self.currentnode_stack[-1], filename) + + if not found: + for n in self.nodepaths: + found = self.cached_find_resource(n, filename) + if found: + break + + listed = self.listed + if found and not found in self.ban_includes: + if found not in listed: + listed.add(found) + self.nodes.append(found) + self.addlines(found) + else: + if filename not in listed: + listed.add(filename) + self.names.append(filename) + return found + + def filter_comments(self, node): + """ + Filter the comments from a c/h file, and return the preprocessor lines. + The regexps :py:attr:`waflib.Tools.c_preproc.re_cpp`, :py:attr:`waflib.Tools.c_preproc.re_nl` and :py:attr:`waflib.Tools.c_preproc.re_lines` are used internally. + + :return: the preprocessor directives as a list of (keyword, line) + :rtype: a list of string pairs + """ + # return a list of tuples : keyword, line + code = node.read() + if use_trigraphs: + for (a, b) in trig_def: + code = code.split(a).join(b) + code = re_nl.sub('', code) + code = re_cpp.sub(repl, code) + return re_lines.findall(code) + + def parse_lines(self, node): + try: + cache = node.ctx.preproc_cache_lines + except AttributeError: + cache = node.ctx.preproc_cache_lines = Utils.lru_cache(LINE_CACHE_SIZE) + try: + return cache[node] + except KeyError: + cache[node] = lines = self.filter_comments(node) + lines.append((POPFILE, '')) + lines.reverse() + return lines + + def addlines(self, node): + """ + Add the lines from a header in the list of preprocessor lines to parse + + :param node: header + :type node: :py:class:`waflib.Node.Node` + """ + + self.currentnode_stack.append(node.parent) + + self.count_files += 1 + if self.count_files > recursion_limit: + # issue #812 + raise PreprocError('recursion limit exceeded') + + if Logs.verbose: + Logs.debug('preproc: reading file %r', node) + try: + lines = self.parse_lines(node) + except EnvironmentError: + raise PreprocError('could not read the file %r' % node) + except Exception: + if Logs.verbose > 0: + Logs.error('parsing %r failed %s', node, traceback.format_exc()) + else: + self.lines.extend(lines) + + def start(self, node, env): + """ + Preprocess a source file to obtain the dependencies, which are accumulated to :py:attr:`waflib.Tools.c_preproc.c_parser.nodes` + and :py:attr:`waflib.Tools.c_preproc.c_parser.names`. + + :param node: source file + :type node: :py:class:`waflib.Node.Node` + :param env: config set containing additional defines to take into account + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + """ + Logs.debug('preproc: scanning %s (in %s)', node.name, node.parent.name) + + self.current_file = node + self.addlines(node) + + # macros may be defined on the command-line, so they must be parsed as if they were part of the file + if env.DEFINES: + lst = format_defines(env.DEFINES) + lst.reverse() + self.lines.extend([('define', x) for x in lst]) + + while self.lines: + (token, line) = self.lines.pop() + if token == POPFILE: + self.count_files -= 1 + self.currentnode_stack.pop() + continue + + try: + state = self.state + + # make certain we define the state if we are about to enter in an if block + if token[:2] == 'if': + state.append(undefined) + elif token == 'endif': + state.pop() + + # skip lines when in a dead 'if' branch, wait for the endif + if token[0] != 'e': + if skipped in self.state or ignored in self.state: + continue + + if token == 'if': + ret = eval_macro(tokenize(line), self.defs) + if ret: + state[-1] = accepted + else: + state[-1] = ignored + elif token == 'ifdef': + m = re_mac.match(line) + if m and m.group() in self.defs: + state[-1] = accepted + else: + state[-1] = ignored + elif token == 'ifndef': + m = re_mac.match(line) + if m and m.group() in self.defs: + state[-1] = ignored + else: + state[-1] = accepted + elif token == 'include' or token == 'import': + (kind, inc) = extract_include(line, self.defs) + self.current_file = self.tryfind(inc, kind, env) + if token == 'import': + self.ban_includes.add(self.current_file) + elif token == 'elif': + if state[-1] == accepted: + state[-1] = skipped + elif state[-1] == ignored: + if eval_macro(tokenize(line), self.defs): + state[-1] = accepted + elif token == 'else': + if state[-1] == accepted: + state[-1] = skipped + elif state[-1] == ignored: + state[-1] = accepted + elif token == 'define': + try: + self.defs[self.define_name(line)] = line + except AttributeError: + raise PreprocError('Invalid define line %r' % line) + elif token == 'undef': + m = re_mac.match(line) + if m and m.group() in self.defs: + self.defs.__delitem__(m.group()) + #print "undef %s" % name + elif token == 'pragma': + if re_pragma_once.match(line.lower()): + self.ban_includes.add(self.current_file) + except Exception as e: + if Logs.verbose: + Logs.debug('preproc: line parsing failed (%s): %s %s', e, line, traceback.format_exc()) + + def define_name(self, line): + """ + :param line: define line + :type line: string + :rtype: string + :return: the define name + """ + return re_mac.match(line).group() + +def scan(task): + """ + Get the dependencies using a c/c++ preprocessor, this is required for finding dependencies of the kind:: + + #include some_macro() + + This function is bound as a task method on :py:class:`waflib.Tools.c.c` and :py:class:`waflib.Tools.cxx.cxx` for example + """ + try: + incn = task.generator.includes_nodes + except AttributeError: + raise Errors.WafError('%r is missing a feature such as "c", "cxx" or "includes": ' % task.generator) + + if go_absolute: + nodepaths = incn + [task.generator.bld.root.find_dir(x) for x in standard_includes] + else: + nodepaths = [x for x in incn if x.is_child_of(x.ctx.srcnode) or x.is_child_of(x.ctx.bldnode)] + + tmp = c_parser(nodepaths) + tmp.start(task.inputs[0], task.env) + return (tmp.nodes, tmp.names) diff --git a/third_party/waf/waflib/Tools/c_tests.py b/third_party/waf/waflib/Tools/c_tests.py new file mode 100644 index 0000000..bdd186c --- /dev/null +++ b/third_party/waf/waflib/Tools/c_tests.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2016-2018 (ita) + +""" +Various configuration tests. +""" + +from waflib import Task +from waflib.Configure import conf +from waflib.TaskGen import feature, before_method, after_method + +LIB_CODE = ''' +#ifdef _MSC_VER +#define testEXPORT __declspec(dllexport) +#else +#define testEXPORT +#endif +testEXPORT int lib_func(void) { return 9; } +''' + +MAIN_CODE = ''' +#ifdef _MSC_VER +#define testEXPORT __declspec(dllimport) +#else +#define testEXPORT +#endif +testEXPORT int lib_func(void); +int main(int argc, char **argv) { + (void)argc; (void)argv; + return !(lib_func() == 9); +} +''' + +@feature('link_lib_test') +@before_method('process_source') +def link_lib_test_fun(self): + """ + The configuration test :py:func:`waflib.Configure.run_build` declares a unique task generator, + so we need to create other task generators from here to check if the linker is able to link libraries. + """ + def write_test_file(task): + task.outputs[0].write(task.generator.code) + + rpath = [] + if getattr(self, 'add_rpath', False): + rpath = [self.bld.path.get_bld().abspath()] + + mode = self.mode + m = '%s %s' % (mode, mode) + ex = self.test_exec and 'test_exec' or '' + bld = self.bld + bld(rule=write_test_file, target='test.' + mode, code=LIB_CODE) + bld(rule=write_test_file, target='main.' + mode, code=MAIN_CODE) + bld(features='%sshlib' % m, source='test.' + mode, target='test') + bld(features='%sprogram %s' % (m, ex), source='main.' + mode, target='app', use='test', rpath=rpath) + +@conf +def check_library(self, mode=None, test_exec=True): + """ + Checks if libraries can be linked with the current linker. Uses :py:func:`waflib.Tools.c_tests.link_lib_test_fun`. + + :param mode: c or cxx or d + :type mode: string + """ + if not mode: + mode = 'c' + if self.env.CXX: + mode = 'cxx' + self.check( + compile_filename = [], + features = 'link_lib_test', + msg = 'Checking for libraries', + mode = mode, + test_exec = test_exec) + +######################################################################################## + +INLINE_CODE = ''' +typedef int foo_t; +static %s foo_t static_foo () {return 0; } +%s foo_t foo () { + return 0; +} +''' +INLINE_VALUES = ['inline', '__inline__', '__inline'] + +@conf +def check_inline(self, **kw): + """ + Checks for the right value for inline macro. + Define INLINE_MACRO to 1 if the define is found. + If the inline macro is not 'inline', add a define to the ``config.h`` (#define inline __inline__) + + :param define_name: define INLINE_MACRO by default to 1 if the macro is defined + :type define_name: string + :param features: by default *c* or *cxx* depending on the compiler present + :type features: list of string + """ + self.start_msg('Checking for inline') + + if not 'define_name' in kw: + kw['define_name'] = 'INLINE_MACRO' + if not 'features' in kw: + if self.env.CXX: + kw['features'] = ['cxx'] + else: + kw['features'] = ['c'] + + for x in INLINE_VALUES: + kw['fragment'] = INLINE_CODE % (x, x) + + try: + self.check(**kw) + except self.errors.ConfigurationError: + continue + else: + self.end_msg(x) + if x != 'inline': + self.define('inline', x, quote=False) + return x + self.fatal('could not use inline functions') + +######################################################################################## + +LARGE_FRAGMENT = '''#include <unistd.h> +int main(int argc, char **argv) { + (void)argc; (void)argv; + return !(sizeof(off_t) >= 8); +} +''' + +@conf +def check_large_file(self, **kw): + """ + Checks for large file support and define the macro HAVE_LARGEFILE + The test is skipped on win32 systems (DEST_BINFMT == pe). + + :param define_name: define to set, by default *HAVE_LARGEFILE* + :type define_name: string + :param execute: execute the test (yes by default) + :type execute: bool + """ + if not 'define_name' in kw: + kw['define_name'] = 'HAVE_LARGEFILE' + if not 'execute' in kw: + kw['execute'] = True + + if not 'features' in kw: + if self.env.CXX: + kw['features'] = ['cxx', 'cxxprogram'] + else: + kw['features'] = ['c', 'cprogram'] + + kw['fragment'] = LARGE_FRAGMENT + + kw['msg'] = 'Checking for large file support' + ret = True + try: + if self.env.DEST_BINFMT != 'pe': + ret = self.check(**kw) + except self.errors.ConfigurationError: + pass + else: + if ret: + return True + + kw['msg'] = 'Checking for -D_FILE_OFFSET_BITS=64' + kw['defines'] = ['_FILE_OFFSET_BITS=64'] + try: + ret = self.check(**kw) + except self.errors.ConfigurationError: + pass + else: + self.define('_FILE_OFFSET_BITS', 64) + return ret + + self.fatal('There is no support for large files') + +######################################################################################## + +ENDIAN_FRAGMENT = ''' +#ifdef _MSC_VER +#define testshlib_EXPORT __declspec(dllexport) +#else +#define testshlib_EXPORT +#endif + +short int ascii_mm[] = { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; +short int ascii_ii[] = { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; +int testshlib_EXPORT use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; +} +short int ebcdic_ii[] = { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; +short int ebcdic_mm[] = { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; +int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; +} +extern int foo; +''' + +class grep_for_endianness(Task.Task): + """ + Task that reads a binary and tries to determine the endianness + """ + color = 'PINK' + def run(self): + txt = self.inputs[0].read(flags='rb').decode('latin-1') + if txt.find('LiTTleEnDian') > -1: + self.generator.tmp.append('little') + elif txt.find('BIGenDianSyS') > -1: + self.generator.tmp.append('big') + else: + return -1 + +@feature('grep_for_endianness') +@after_method('apply_link') +def grep_for_endianness_fun(self): + """ + Used by the endianness configuration test + """ + self.create_task('grep_for_endianness', self.link_task.outputs[0]) + +@conf +def check_endianness(self): + """ + Executes a configuration test to determine the endianness + """ + tmp = [] + def check_msg(self): + return tmp[0] + + self.check(fragment=ENDIAN_FRAGMENT, features='c cshlib grep_for_endianness', + msg='Checking for endianness', define='ENDIANNESS', tmp=tmp, + okmsg=check_msg, confcache=None) + return tmp[0] + diff --git a/third_party/waf/waflib/Tools/ccroot.py b/third_party/waf/waflib/Tools/ccroot.py new file mode 100644 index 0000000..76deff5 --- /dev/null +++ b/third_party/waf/waflib/Tools/ccroot.py @@ -0,0 +1,792 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Classes and methods shared by tools providing support for C-like language such +as C/C++/D/Assembly/Go (this support module is almost never used alone). +""" + +import os, re +from waflib import Task, Utils, Node, Errors, Logs +from waflib.TaskGen import after_method, before_method, feature, taskgen_method, extension +from waflib.Tools import c_aliases, c_preproc, c_config, c_osx, c_tests +from waflib.Configure import conf + +SYSTEM_LIB_PATHS = ['/usr/lib64', '/usr/lib', '/usr/local/lib64', '/usr/local/lib'] + +USELIB_VARS = Utils.defaultdict(set) +""" +Mapping for features to :py:class:`waflib.ConfigSet.ConfigSet` variables. See :py:func:`waflib.Tools.ccroot.propagate_uselib_vars`. +""" + +USELIB_VARS['c'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CCDEPS', 'CFLAGS', 'ARCH']) +USELIB_VARS['cxx'] = set(['INCLUDES', 'FRAMEWORKPATH', 'DEFINES', 'CPPFLAGS', 'CXXDEPS', 'CXXFLAGS', 'ARCH']) +USELIB_VARS['d'] = set(['INCLUDES', 'DFLAGS']) +USELIB_VARS['includes'] = set(['INCLUDES', 'FRAMEWORKPATH', 'ARCH']) + +USELIB_VARS['cprogram'] = USELIB_VARS['cxxprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) +USELIB_VARS['cshlib'] = USELIB_VARS['cxxshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'FRAMEWORK', 'FRAMEWORKPATH', 'ARCH', 'LDFLAGS']) +USELIB_VARS['cstlib'] = USELIB_VARS['cxxstlib'] = set(['ARFLAGS', 'LINKDEPS']) + +USELIB_VARS['dprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) +USELIB_VARS['dshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS']) +USELIB_VARS['dstlib'] = set(['ARFLAGS', 'LINKDEPS']) + +USELIB_VARS['asm'] = set(['ASFLAGS']) + +# ================================================================================================= + +@taskgen_method +def create_compiled_task(self, name, node): + """ + Create the compilation task: c, cxx, asm, etc. The output node is created automatically (object file with a typical **.o** extension). + The task is appended to the list *compiled_tasks* which is then used by :py:func:`waflib.Tools.ccroot.apply_link` + + :param name: name of the task class + :type name: string + :param node: the file to compile + :type node: :py:class:`waflib.Node.Node` + :return: The task created + :rtype: :py:class:`waflib.Task.Task` + """ + out = '%s.%d.o' % (node.name, self.idx) + task = self.create_task(name, node, node.parent.find_or_declare(out)) + try: + self.compiled_tasks.append(task) + except AttributeError: + self.compiled_tasks = [task] + return task + +@taskgen_method +def to_incnodes(self, inlst): + """ + Task generator method provided to convert a list of string/nodes into a list of includes folders. + + The paths are assumed to be relative to the task generator path, except if they begin by **#** + in which case they are searched from the top-level directory (``bld.srcnode``). + The folders are simply assumed to be existing. + + The node objects in the list are returned in the output list. The strings are converted + into node objects if possible. The node is searched from the source directory, and if a match is found, + the equivalent build directory is created and added to the returned list too. When a folder cannot be found, it is ignored. + + :param inlst: list of folders + :type inlst: space-delimited string or a list of string/nodes + :rtype: list of :py:class:`waflib.Node.Node` + :return: list of include folders as nodes + """ + lst = [] + seen = set() + for x in self.to_list(inlst): + if x in seen or not x: + continue + seen.add(x) + + # with a real lot of targets, it is sometimes interesting to cache the results below + if isinstance(x, Node.Node): + lst.append(x) + else: + if os.path.isabs(x): + lst.append(self.bld.root.make_node(x) or x) + else: + if x[0] == '#': + p = self.bld.bldnode.make_node(x[1:]) + v = self.bld.srcnode.make_node(x[1:]) + else: + p = self.path.get_bld().make_node(x) + v = self.path.make_node(x) + if p.is_child_of(self.bld.bldnode): + p.mkdir() + lst.append(p) + lst.append(v) + return lst + +@feature('c', 'cxx', 'd', 'asm', 'fc', 'includes') +@after_method('propagate_uselib_vars', 'process_source') +def apply_incpaths(self): + """ + Task generator method that processes the attribute *includes*:: + + tg = bld(features='includes', includes='.') + + The folders only need to be relative to the current directory, the equivalent build directory is + added automatically (for headers created in the build directory). This enables using a build directory + or not (``top == out``). + + This method will add a list of nodes read by :py:func:`waflib.Tools.ccroot.to_incnodes` in ``tg.env.INCPATHS``, + and the list of include paths in ``tg.env.INCLUDES``. + """ + + lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env.INCLUDES) + self.includes_nodes = lst + cwd = self.get_cwd() + self.env.INCPATHS = [x.path_from(cwd) for x in lst] + +class link_task(Task.Task): + """ + Base class for all link tasks. A task generator is supposed to have at most one link task bound in the attribute *link_task*. See :py:func:`waflib.Tools.ccroot.apply_link`. + + .. inheritance-diagram:: waflib.Tools.ccroot.stlink_task waflib.Tools.c.cprogram waflib.Tools.c.cshlib waflib.Tools.cxx.cxxstlib waflib.Tools.cxx.cxxprogram waflib.Tools.cxx.cxxshlib waflib.Tools.d.dprogram waflib.Tools.d.dshlib waflib.Tools.d.dstlib waflib.Tools.ccroot.fake_shlib waflib.Tools.ccroot.fake_stlib waflib.Tools.asm.asmprogram waflib.Tools.asm.asmshlib waflib.Tools.asm.asmstlib + :top-classes: waflib.Tools.ccroot.link_task + """ + color = 'YELLOW' + + weight = 3 + """Try to process link tasks as early as possible""" + + inst_to = None + """Default installation path for the link task outputs, or None to disable""" + + chmod = Utils.O755 + """Default installation mode for the link task outputs""" + + def add_target(self, target): + """ + Process the *target* attribute to add the platform-specific prefix/suffix such as *.so* or *.exe*. + The settings are retrieved from ``env.clsname_PATTERN`` + """ + if isinstance(target, str): + base = self.generator.path + if target.startswith('#'): + # for those who like flat structures + target = target[1:] + base = self.generator.bld.bldnode + + pattern = self.env[self.__class__.__name__ + '_PATTERN'] + if not pattern: + pattern = '%s' + folder, name = os.path.split(target) + + if self.__class__.__name__.find('shlib') > 0 and getattr(self.generator, 'vnum', None): + nums = self.generator.vnum.split('.') + if self.env.DEST_BINFMT == 'pe': + # include the version in the dll file name, + # the import lib file name stays unversioned. + name = name + '-' + nums[0] + elif self.env.DEST_OS == 'openbsd': + pattern = '%s.%s' % (pattern, nums[0]) + if len(nums) >= 2: + pattern += '.%s' % nums[1] + + if folder: + tmp = folder + os.sep + pattern % name + else: + tmp = pattern % name + target = base.find_or_declare(tmp) + self.set_outputs(target) + + def exec_command(self, *k, **kw): + ret = super(link_task, self).exec_command(*k, **kw) + if not ret and self.env.DO_MANIFEST: + ret = self.exec_mf() + return ret + + def exec_mf(self): + """ + Create manifest files for VS-like compilers (msvc, ifort, ...) + """ + if not self.env.MT: + return 0 + + manifest = None + for out_node in self.outputs: + if out_node.name.endswith('.manifest'): + manifest = out_node.abspath() + break + else: + # Should never get here. If we do, it means the manifest file was + # never added to the outputs list, thus we don't have a manifest file + # to embed, so we just return. + return 0 + + # embedding mode. Different for EXE's and DLL's. + # see: http://msdn2.microsoft.com/en-us/library/ms235591(VS.80).aspx + mode = '' + for x in Utils.to_list(self.generator.features): + if x in ('cprogram', 'cxxprogram', 'fcprogram', 'fcprogram_test'): + mode = 1 + elif x in ('cshlib', 'cxxshlib', 'fcshlib'): + mode = 2 + + Logs.debug('msvc: embedding manifest in mode %r', mode) + + lst = [] + self.env.MT + lst.extend(Utils.to_list(self.env.MTFLAGS)) + lst.extend(['-manifest', manifest]) + lst.append('-outputresource:%s;%s' % (self.outputs[0].abspath(), mode)) + + return super(link_task, self).exec_command(lst) + +class stlink_task(link_task): + """ + Base for static link tasks, which use *ar* most of the time. + The target is always removed before being written. + """ + run_str = '${AR} ${ARFLAGS} ${AR_TGT_F}${TGT} ${AR_SRC_F}${SRC}' + + chmod = Utils.O644 + """Default installation mode for the static libraries""" + +def rm_tgt(cls): + old = cls.run + def wrap(self): + try: + os.remove(self.outputs[0].abspath()) + except OSError: + pass + return old(self) + setattr(cls, 'run', wrap) +rm_tgt(stlink_task) + +@feature('skip_stlib_link_deps') +@before_method('process_use') +def apply_skip_stlib_link_deps(self): + """ + This enables an optimization in the :py:func:wafilb.Tools.ccroot.processes_use: method that skips dependency and + link flag optimizations for targets that generate static libraries (via the :py:class:Tools.ccroot.stlink_task task). + The actual behavior is implemented in :py:func:wafilb.Tools.ccroot.processes_use: method so this feature only tells waf + to enable the new behavior. + """ + self.env.SKIP_STLIB_LINK_DEPS = True + +@feature('c', 'cxx', 'd', 'fc', 'asm') +@after_method('process_source') +def apply_link(self): + """ + Collect the tasks stored in ``compiled_tasks`` (created by :py:func:`waflib.Tools.ccroot.create_compiled_task`), and + use the outputs for a new instance of :py:class:`waflib.Tools.ccroot.link_task`. The class to use is the first link task + matching a name from the attribute *features*, for example:: + + def build(bld): + tg = bld(features='cxx cxxprogram cprogram', source='main.c', target='app') + + will create the task ``tg.link_task`` as a new instance of :py:class:`waflib.Tools.cxx.cxxprogram` + """ + + for x in self.features: + if x == 'cprogram' and 'cxx' in self.features: # limited compat + x = 'cxxprogram' + elif x == 'cshlib' and 'cxx' in self.features: + x = 'cxxshlib' + + if x in Task.classes: + if issubclass(Task.classes[x], link_task): + link = x + break + else: + return + + objs = [t.outputs[0] for t in getattr(self, 'compiled_tasks', [])] + self.link_task = self.create_task(link, objs) + self.link_task.add_target(self.target) + + # remember that the install paths are given by the task generators + try: + inst_to = self.install_path + except AttributeError: + inst_to = self.link_task.inst_to + if inst_to: + # install a copy of the node list we have at this moment (implib not added) + self.install_task = self.add_install_files( + install_to=inst_to, install_from=self.link_task.outputs[:], + chmod=self.link_task.chmod, task=self.link_task) + +@taskgen_method +def use_rec(self, name, **kw): + """ + Processes the ``use`` keyword recursively. This method is kind of private and only meant to be used from ``process_use`` + """ + + if name in self.tmp_use_not or name in self.tmp_use_seen: + return + + try: + y = self.bld.get_tgen_by_name(name) + except Errors.WafError: + self.uselib.append(name) + self.tmp_use_not.add(name) + return + + self.tmp_use_seen.append(name) + y.post() + + # bind temporary attributes on the task generator + y.tmp_use_objects = objects = kw.get('objects', True) + y.tmp_use_stlib = stlib = kw.get('stlib', True) + try: + link_task = y.link_task + except AttributeError: + y.tmp_use_var = '' + else: + objects = False + if not isinstance(link_task, stlink_task): + stlib = False + y.tmp_use_var = 'LIB' + else: + y.tmp_use_var = 'STLIB' + + p = self.tmp_use_prec + for x in self.to_list(getattr(y, 'use', [])): + if self.env["STLIB_" + x]: + continue + try: + p[x].append(name) + except KeyError: + p[x] = [name] + self.use_rec(x, objects=objects, stlib=stlib) + +@feature('c', 'cxx', 'd', 'use', 'fc') +@before_method('apply_incpaths', 'propagate_uselib_vars') +@after_method('apply_link', 'process_source') +def process_use(self): + """ + Process the ``use`` attribute which contains a list of task generator names:: + + def build(bld): + bld.shlib(source='a.c', target='lib1') + bld.program(source='main.c', target='app', use='lib1') + + See :py:func:`waflib.Tools.ccroot.use_rec`. + """ + + use_not = self.tmp_use_not = set() + self.tmp_use_seen = [] # we would like an ordered set + use_prec = self.tmp_use_prec = {} + self.uselib = self.to_list(getattr(self, 'uselib', [])) + self.includes = self.to_list(getattr(self, 'includes', [])) + names = self.to_list(getattr(self, 'use', [])) + + for x in names: + self.use_rec(x) + + for x in use_not: + if x in use_prec: + del use_prec[x] + + # topological sort + out = self.tmp_use_sorted = [] + tmp = [] + for x in self.tmp_use_seen: + for k in use_prec.values(): + if x in k: + break + else: + tmp.append(x) + + while tmp: + e = tmp.pop() + out.append(e) + try: + nlst = use_prec[e] + except KeyError: + pass + else: + del use_prec[e] + for x in nlst: + for y in use_prec: + if x in use_prec[y]: + break + else: + tmp.append(x) + if use_prec: + raise Errors.WafError('Cycle detected in the use processing %r' % use_prec) + out.reverse() + + link_task = getattr(self, 'link_task', None) + for x in out: + y = self.bld.get_tgen_by_name(x) + var = y.tmp_use_var + if var and link_task: + if self.env.SKIP_STLIB_LINK_DEPS and isinstance(link_task, stlink_task): + # If the skip_stlib_link_deps feature is enabled then we should + # avoid adding lib deps to the stlink_task instance. + pass + elif var == 'LIB' or y.tmp_use_stlib or x in names: + self.env.append_value(var, [y.target[y.target.rfind(os.sep) + 1:]]) + self.link_task.dep_nodes.extend(y.link_task.outputs) + tmp_path = y.link_task.outputs[0].parent.path_from(self.get_cwd()) + self.env.append_unique(var + 'PATH', [tmp_path]) + else: + if y.tmp_use_objects: + self.add_objects_from_tgen(y) + + if getattr(y, 'export_includes', None): + # self.includes may come from a global variable #2035 + self.includes = self.includes + y.to_incnodes(y.export_includes) + + if getattr(y, 'export_defines', None): + self.env.append_value('DEFINES', self.to_list(y.export_defines)) + + + # and finally, add the use variables (no recursion needed) + for x in names: + try: + y = self.bld.get_tgen_by_name(x) + except Errors.WafError: + if not self.env['STLIB_' + x] and not x in self.uselib: + self.uselib.append(x) + else: + for k in self.to_list(getattr(y, 'use', [])): + if not self.env['STLIB_' + k] and not k in self.uselib: + self.uselib.append(k) + +@taskgen_method +def accept_node_to_link(self, node): + """ + PRIVATE INTERNAL USE ONLY + """ + return not node.name.endswith('.pdb') + +@taskgen_method +def add_objects_from_tgen(self, tg): + """ + Add the objects from the depending compiled tasks as link task inputs. + + Some objects are filtered: for instance, .pdb files are added + to the compiled tasks but not to the link tasks (to avoid errors) + PRIVATE INTERNAL USE ONLY + """ + try: + link_task = self.link_task + except AttributeError: + pass + else: + for tsk in getattr(tg, 'compiled_tasks', []): + for x in tsk.outputs: + if self.accept_node_to_link(x): + link_task.inputs.append(x) + +@taskgen_method +def get_uselib_vars(self): + """ + :return: the *uselib* variables associated to the *features* attribute (see :py:attr:`waflib.Tools.ccroot.USELIB_VARS`) + :rtype: list of string + """ + _vars = set() + for x in self.features: + if x in USELIB_VARS: + _vars |= USELIB_VARS[x] + return _vars + +@feature('c', 'cxx', 'd', 'fc', 'javac', 'cs', 'uselib', 'asm') +@after_method('process_use') +def propagate_uselib_vars(self): + """ + Process uselib variables for adding flags. For example, the following target:: + + def build(bld): + bld.env.AFLAGS_aaa = ['bar'] + from waflib.Tools.ccroot import USELIB_VARS + USELIB_VARS['aaa'] = ['AFLAGS'] + + tg = bld(features='aaa', aflags='test') + + The *aflags* attribute will be processed and this method will set:: + + tg.env.AFLAGS = ['bar', 'test'] + """ + _vars = self.get_uselib_vars() + env = self.env + app = env.append_value + feature_uselib = self.features + self.to_list(getattr(self, 'uselib', [])) + for var in _vars: + y = var.lower() + val = getattr(self, y, []) + if val: + app(var, self.to_list(val)) + + for x in feature_uselib: + val = env['%s_%s' % (var, x)] + if val: + app(var, val) + +# ============ the code above must not know anything about import libs ========== + +@feature('cshlib', 'cxxshlib', 'fcshlib') +@after_method('apply_link') +def apply_implib(self): + """ + Handle dlls and their import libs on Windows-like systems. + + A ``.dll.a`` file called *import library* is generated. + It must be installed as it is required for linking the library. + """ + if not self.env.DEST_BINFMT == 'pe': + return + + dll = self.link_task.outputs[0] + if isinstance(self.target, Node.Node): + name = self.target.name + else: + name = os.path.split(self.target)[1] + implib = self.env.implib_PATTERN % name + implib = dll.parent.find_or_declare(implib) + self.env.append_value('LINKFLAGS', self.env.IMPLIB_ST % implib.bldpath()) + self.link_task.outputs.append(implib) + + if getattr(self, 'defs', None) and self.env.DEST_BINFMT == 'pe': + node = self.path.find_resource(self.defs) + if not node: + raise Errors.WafError('invalid def file %r' % self.defs) + if self.env.def_PATTERN: + self.env.append_value('LINKFLAGS', self.env.def_PATTERN % node.path_from(self.get_cwd())) + self.link_task.dep_nodes.append(node) + else: + # gcc for windows takes *.def file as input without any special flag + self.link_task.inputs.append(node) + + # where to put the import library + if getattr(self, 'install_task', None): + try: + # user has given a specific installation path for the import library + inst_to = self.install_path_implib + except AttributeError: + try: + # user has given an installation path for the main library, put the import library in it + inst_to = self.install_path + except AttributeError: + # else, put the library in BINDIR and the import library in LIBDIR + inst_to = '${IMPLIBDIR}' + self.install_task.install_to = '${BINDIR}' + if not self.env.IMPLIBDIR: + self.env.IMPLIBDIR = self.env.LIBDIR + self.implib_install_task = self.add_install_files(install_to=inst_to, install_from=implib, + chmod=self.link_task.chmod, task=self.link_task) + +# ============ the code above must not know anything about vnum processing on unix platforms ========= + +re_vnum = re.compile('^([1-9]\\d*|0)([.]([1-9]\\d*|0)){0,2}?$') +@feature('cshlib', 'cxxshlib', 'dshlib', 'fcshlib', 'vnum') +@after_method('apply_link', 'propagate_uselib_vars') +def apply_vnum(self): + """ + Enforce version numbering on shared libraries. The valid version numbers must have either zero or two dots:: + + def build(bld): + bld.shlib(source='a.c', target='foo', vnum='14.15.16') + + In this example on Linux platform, ``libfoo.so`` is installed as ``libfoo.so.14.15.16``, and the following symbolic links are created: + + * ``libfoo.so → libfoo.so.14.15.16`` + * ``libfoo.so.14 → libfoo.so.14.15.16`` + + By default, the library will be assigned SONAME ``libfoo.so.14``, effectively declaring ABI compatibility between all minor and patch releases for the major version of the library. When necessary, the compatibility can be explicitly defined using `cnum` parameter: + + def build(bld): + bld.shlib(source='a.c', target='foo', vnum='14.15.16', cnum='14.15') + + In this case, the assigned SONAME will be ``libfoo.so.14.15`` with ABI compatibility only between path releases for a specific major and minor version of the library. + + On OS X platform, install-name parameter will follow the above logic for SONAME with exception that it also specifies an absolute path (based on install_path) of the library. + """ + if not getattr(self, 'vnum', '') or os.name != 'posix' or self.env.DEST_BINFMT not in ('elf', 'mac-o'): + return + + link = self.link_task + if not re_vnum.match(self.vnum): + raise Errors.WafError('Invalid vnum %r for target %r' % (self.vnum, getattr(self, 'name', self))) + nums = self.vnum.split('.') + node = link.outputs[0] + + cnum = getattr(self, 'cnum', str(nums[0])) + cnums = cnum.split('.') + if len(cnums)>len(nums) or nums[0:len(cnums)] != cnums: + raise Errors.WafError('invalid compatibility version %s' % cnum) + + libname = node.name + if libname.endswith('.dylib'): + name3 = libname.replace('.dylib', '.%s.dylib' % self.vnum) + name2 = libname.replace('.dylib', '.%s.dylib' % cnum) + else: + name3 = libname + '.' + self.vnum + name2 = libname + '.' + cnum + + # add the so name for the ld linker - to disable, just unset env.SONAME_ST + if self.env.SONAME_ST: + v = self.env.SONAME_ST % name2 + self.env.append_value('LINKFLAGS', v.split()) + + # the following task is just to enable execution from the build dir :-/ + if self.env.DEST_OS != 'openbsd': + outs = [node.parent.make_node(name3)] + if name2 != name3: + outs.append(node.parent.make_node(name2)) + self.create_task('vnum', node, outs) + + if getattr(self, 'install_task', None): + self.install_task.hasrun = Task.SKIPPED + self.install_task.no_errcheck_out = True + path = self.install_task.install_to + if self.env.DEST_OS == 'openbsd': + libname = self.link_task.outputs[0].name + t1 = self.add_install_as(install_to='%s/%s' % (path, libname), install_from=node, chmod=self.link_task.chmod) + self.vnum_install_task = (t1,) + else: + t1 = self.add_install_as(install_to=path + os.sep + name3, install_from=node, chmod=self.link_task.chmod) + t3 = self.add_symlink_as(install_to=path + os.sep + libname, install_from=name3) + if name2 != name3: + t2 = self.add_symlink_as(install_to=path + os.sep + name2, install_from=name3) + self.vnum_install_task = (t1, t2, t3) + else: + self.vnum_install_task = (t1, t3) + + if '-dynamiclib' in self.env.LINKFLAGS: + # this requires after(propagate_uselib_vars) + try: + inst_to = self.install_path + except AttributeError: + inst_to = self.link_task.inst_to + if inst_to: + p = Utils.subst_vars(inst_to, self.env) + path = os.path.join(p, name2) + self.env.append_value('LINKFLAGS', ['-install_name', path]) + self.env.append_value('LINKFLAGS', '-Wl,-compatibility_version,%s' % cnum) + self.env.append_value('LINKFLAGS', '-Wl,-current_version,%s' % self.vnum) + +class vnum(Task.Task): + """ + Create the symbolic links for a versioned shared library. Instances are created by :py:func:`waflib.Tools.ccroot.apply_vnum` + """ + color = 'CYAN' + ext_in = ['.bin'] + def keyword(self): + return 'Symlinking' + def run(self): + for x in self.outputs: + path = x.abspath() + try: + os.remove(path) + except OSError: + pass + + try: + os.symlink(self.inputs[0].name, path) + except OSError: + return 1 + +class fake_shlib(link_task): + """ + Task used for reading a system library and adding the dependency on it + """ + def runnable_status(self): + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + return Task.SKIP_ME + +class fake_stlib(stlink_task): + """ + Task used for reading a system library and adding the dependency on it + """ + def runnable_status(self): + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + return Task.SKIP_ME + +@conf +def read_shlib(self, name, paths=[], export_includes=[], export_defines=[]): + """ + Read a system shared library, enabling its use as a local library. Will trigger a rebuild if the file changes:: + + def build(bld): + bld.read_shlib('m') + bld.program(source='main.c', use='m') + """ + return self(name=name, features='fake_lib', lib_paths=paths, lib_type='shlib', export_includes=export_includes, export_defines=export_defines) + +@conf +def read_stlib(self, name, paths=[], export_includes=[], export_defines=[]): + """ + Read a system static library, enabling a use as a local library. Will trigger a rebuild if the file changes. + """ + return self(name=name, features='fake_lib', lib_paths=paths, lib_type='stlib', export_includes=export_includes, export_defines=export_defines) + +lib_patterns = { + 'shlib' : ['lib%s.so', '%s.so', 'lib%s.dylib', 'lib%s.dll', '%s.dll'], + 'stlib' : ['lib%s.a', '%s.a', 'lib%s.dll', '%s.dll', 'lib%s.lib', '%s.lib'], +} + +@feature('fake_lib') +def process_lib(self): + """ + Find the location of a foreign library. Used by :py:class:`waflib.Tools.ccroot.read_shlib` and :py:class:`waflib.Tools.ccroot.read_stlib`. + """ + node = None + + names = [x % self.name for x in lib_patterns[self.lib_type]] + for x in self.lib_paths + [self.path] + SYSTEM_LIB_PATHS: + if not isinstance(x, Node.Node): + x = self.bld.root.find_node(x) or self.path.find_node(x) + if not x: + continue + + for y in names: + node = x.find_node(y) + if node: + try: + Utils.h_file(node.abspath()) + except EnvironmentError: + raise ValueError('Could not read %r' % y) + break + else: + continue + break + else: + raise Errors.WafError('could not find library %r' % self.name) + self.link_task = self.create_task('fake_%s' % self.lib_type, [], [node]) + self.target = self.name + + +class fake_o(Task.Task): + def runnable_status(self): + return Task.SKIP_ME + +@extension('.o', '.obj') +def add_those_o_files(self, node): + tsk = self.create_task('fake_o', [], node) + try: + self.compiled_tasks.append(tsk) + except AttributeError: + self.compiled_tasks = [tsk] + +@feature('fake_obj') +@before_method('process_source') +def process_objs(self): + """ + Puts object files in the task generator outputs + """ + for node in self.to_nodes(self.source): + self.add_those_o_files(node) + self.source = [] + +@conf +def read_object(self, obj): + """ + Read an object file, enabling injection in libs/programs. Will trigger a rebuild if the file changes. + + :param obj: object file path, as string or Node + """ + if not isinstance(obj, self.path.__class__): + obj = self.path.find_resource(obj) + return self(features='fake_obj', source=obj, name=obj.name) + +@feature('cxxprogram', 'cprogram') +@after_method('apply_link', 'process_use') +def set_full_paths_hpux(self): + """ + On hp-ux, extend the libpaths and static library paths to absolute paths + """ + if self.env.DEST_OS != 'hp-ux': + return + base = self.bld.bldnode.abspath() + for var in ['LIBPATH', 'STLIBPATH']: + lst = [] + for x in self.env[var]: + if x.startswith('/'): + lst.append(x) + else: + lst.append(os.path.normpath(os.path.join(base, x))) + self.env[var] = lst + diff --git a/third_party/waf/waflib/Tools/clang.py b/third_party/waf/waflib/Tools/clang.py new file mode 100644 index 0000000..3828e39 --- /dev/null +++ b/third_party/waf/waflib/Tools/clang.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Krzysztof Kosiński 2014 + +""" +Detect the Clang C compiler +""" + +from waflib.Tools import ccroot, ar, gcc +from waflib.Configure import conf + +@conf +def find_clang(conf): + """ + Finds the program clang and executes it to ensure it really is clang + """ + cc = conf.find_program('clang', var='CC') + conf.get_cc_version(cc, clang=True) + conf.env.CC_NAME = 'clang' + +def configure(conf): + conf.find_clang() + conf.find_program(['llvm-ar', 'ar'], var='AR') + conf.find_ar() + conf.gcc_common_flags() + conf.gcc_modifier_platform() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/Tools/clangxx.py b/third_party/waf/waflib/Tools/clangxx.py new file mode 100644 index 0000000..152013c --- /dev/null +++ b/third_party/waf/waflib/Tools/clangxx.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy 2009-2018 (ita) + +""" +Detect the Clang++ C++ compiler +""" + +from waflib.Tools import ccroot, ar, gxx +from waflib.Configure import conf + +@conf +def find_clangxx(conf): + """ + Finds the program clang++, and executes it to ensure it really is clang++ + """ + cxx = conf.find_program('clang++', var='CXX') + conf.get_cc_version(cxx, clang=True) + conf.env.CXX_NAME = 'clang' + +def configure(conf): + conf.find_clangxx() + conf.find_program(['llvm-ar', 'ar'], var='AR') + conf.find_ar() + conf.gxx_common_flags() + conf.gxx_modifier_platform() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Tools/compiler_c.py b/third_party/waf/waflib/Tools/compiler_c.py new file mode 100644 index 0000000..e033ce6 --- /dev/null +++ b/third_party/waf/waflib/Tools/compiler_c.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Matthias Jahn jahn dôt matthias ât freenet dôt de, 2007 (pmarat) + +""" +Try to detect a C compiler from the list of supported compilers (gcc, msvc, etc):: + + def options(opt): + opt.load('compiler_c') + def configure(cnf): + cnf.load('compiler_c') + def build(bld): + bld.program(source='main.c', target='app') + +The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_c.c_compiler`. To register +a new C compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: + + from waflib.Tools.compiler_c import c_compiler + c_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] + + def options(opt): + opt.load('compiler_c') + def configure(cnf): + cnf.load('compiler_c') + def build(bld): + bld.program(source='main.c', target='app') + +Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: + + $ CC=clang waf configure +""" + +import re +from waflib.Tools import ccroot +from waflib import Utils +from waflib.Logs import debug + +c_compiler = { +'win32': ['msvc', 'gcc', 'clang'], +'cygwin': ['gcc', 'clang'], +'darwin': ['clang', 'gcc'], +'aix': ['xlc', 'gcc', 'clang'], +'linux': ['gcc', 'clang', 'icc'], +'sunos': ['suncc', 'gcc'], +'irix': ['gcc', 'irixcc'], +'hpux': ['gcc'], +'osf1V': ['gcc'], +'gnu': ['gcc', 'clang'], +'java': ['gcc', 'msvc', 'clang', 'icc'], +'gnukfreebsd': ['gcc', 'clang'], +'default': ['clang', 'gcc'], +} +""" +Dict mapping platform names to Waf tools finding specific C compilers:: + + from waflib.Tools.compiler_c import c_compiler + c_compiler['linux'] = ['gcc', 'icc', 'suncc'] +""" + +def default_compilers(): + build_platform = Utils.unversioned_sys_platform() + possible_compiler_list = c_compiler.get(build_platform, c_compiler['default']) + return ' '.join(possible_compiler_list) + +def configure(conf): + """ + Detects a suitable C compiler + + :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found + """ + try: + test_for_compiler = conf.options.check_c_compiler or default_compilers() + except AttributeError: + conf.fatal("Add options(opt): opt.load('compiler_c')") + + for compiler in re.split('[ ,]+', test_for_compiler): + conf.env.stash() + conf.start_msg('Checking for %r (C compiler)' % compiler) + try: + conf.load(compiler) + except conf.errors.ConfigurationError as e: + conf.env.revert() + conf.end_msg(False) + debug('compiler_c: %r', e) + else: + if conf.env.CC: + conf.end_msg(conf.env.get_flat('CC')) + conf.env.COMPILER_CC = compiler + conf.env.commit() + break + conf.env.revert() + conf.end_msg(False) + else: + conf.fatal('could not configure a C compiler!') + +def options(opt): + """ + This is how to provide compiler preferences on the command-line:: + + $ waf configure --check-c-compiler=gcc + """ + test_for_compiler = default_compilers() + opt.load_special_tools('c_*.py', ban=['c_dumbpreproc.py']) + cc_compiler_opts = opt.add_option_group('Configuration options') + cc_compiler_opts.add_option('--check-c-compiler', default=None, + help='list of C compilers to try [%s]' % test_for_compiler, + dest="check_c_compiler") + + for x in test_for_compiler.split(): + opt.load('%s' % x) + diff --git a/third_party/waf/waflib/Tools/compiler_cxx.py b/third_party/waf/waflib/Tools/compiler_cxx.py new file mode 100644 index 0000000..42658c5 --- /dev/null +++ b/third_party/waf/waflib/Tools/compiler_cxx.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Matthias Jahn jahn dôt matthias ât freenet dôt de 2007 (pmarat) + +""" +Try to detect a C++ compiler from the list of supported compilers (g++, msvc, etc):: + + def options(opt): + opt.load('compiler_cxx') + def configure(cnf): + cnf.load('compiler_cxx') + def build(bld): + bld.program(source='main.cpp', target='app') + +The compilers are associated to platforms in :py:attr:`waflib.Tools.compiler_cxx.cxx_compiler`. To register +a new C++ compiler named *cfoo* (assuming the tool ``waflib/extras/cfoo.py`` exists), use:: + + from waflib.Tools.compiler_cxx import cxx_compiler + cxx_compiler['win32'] = ['cfoo', 'msvc', 'gcc'] + + def options(opt): + opt.load('compiler_cxx') + def configure(cnf): + cnf.load('compiler_cxx') + def build(bld): + bld.program(source='main.c', target='app') + +Not all compilers need to have a specific tool. For example, the clang compilers can be detected by the gcc tools when using:: + + $ CXX=clang waf configure +""" + + +import re +from waflib.Tools import ccroot +from waflib import Utils +from waflib.Logs import debug + +cxx_compiler = { +'win32': ['msvc', 'g++', 'clang++'], +'cygwin': ['g++', 'clang++'], +'darwin': ['clang++', 'g++'], +'aix': ['xlc++', 'g++', 'clang++'], +'linux': ['g++', 'clang++', 'icpc'], +'sunos': ['sunc++', 'g++'], +'irix': ['g++'], +'hpux': ['g++'], +'osf1V': ['g++'], +'gnu': ['g++', 'clang++'], +'java': ['g++', 'msvc', 'clang++', 'icpc'], +'gnukfreebsd': ['g++', 'clang++'], +'default': ['clang++', 'g++'] +} +""" +Dict mapping the platform names to Waf tools finding specific C++ compilers:: + + from waflib.Tools.compiler_cxx import cxx_compiler + cxx_compiler['linux'] = ['gxx', 'icpc', 'suncxx'] +""" + +def default_compilers(): + build_platform = Utils.unversioned_sys_platform() + possible_compiler_list = cxx_compiler.get(build_platform, cxx_compiler['default']) + return ' '.join(possible_compiler_list) + +def configure(conf): + """ + Detects a suitable C++ compiler + + :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found + """ + try: + test_for_compiler = conf.options.check_cxx_compiler or default_compilers() + except AttributeError: + conf.fatal("Add options(opt): opt.load('compiler_cxx')") + + for compiler in re.split('[ ,]+', test_for_compiler): + conf.env.stash() + conf.start_msg('Checking for %r (C++ compiler)' % compiler) + try: + conf.load(compiler) + except conf.errors.ConfigurationError as e: + conf.env.revert() + conf.end_msg(False) + debug('compiler_cxx: %r', e) + else: + if conf.env.CXX: + conf.end_msg(conf.env.get_flat('CXX')) + conf.env.COMPILER_CXX = compiler + conf.env.commit() + break + conf.env.revert() + conf.end_msg(False) + else: + conf.fatal('could not configure a C++ compiler!') + +def options(opt): + """ + This is how to provide compiler preferences on the command-line:: + + $ waf configure --check-cxx-compiler=gxx + """ + test_for_compiler = default_compilers() + opt.load_special_tools('cxx_*.py') + cxx_compiler_opts = opt.add_option_group('Configuration options') + cxx_compiler_opts.add_option('--check-cxx-compiler', default=None, + help='list of C++ compilers to try [%s]' % test_for_compiler, + dest="check_cxx_compiler") + + for x in test_for_compiler.split(): + opt.load('%s' % x) + diff --git a/third_party/waf/waflib/Tools/compiler_d.py b/third_party/waf/waflib/Tools/compiler_d.py new file mode 100644 index 0000000..43bb1f6 --- /dev/null +++ b/third_party/waf/waflib/Tools/compiler_d.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Carlos Rafael Giani, 2007 (dv) +# Thomas Nagy, 2016-2018 (ita) + +""" +Try to detect a D compiler from the list of supported compilers:: + + def options(opt): + opt.load('compiler_d') + def configure(cnf): + cnf.load('compiler_d') + def build(bld): + bld.program(source='main.d', target='app') + +Only three D compilers are really present at the moment: + +* gdc +* dmd, the ldc compiler having a very similar command-line interface +* ldc2 +""" + +import re +from waflib import Utils, Logs + +d_compiler = { +'default' : ['gdc', 'dmd', 'ldc2'] +} +""" +Dict mapping the platform names to lists of names of D compilers to try, in order of preference:: + + from waflib.Tools.compiler_d import d_compiler + d_compiler['default'] = ['gdc', 'dmd', 'ldc2'] +""" + +def default_compilers(): + build_platform = Utils.unversioned_sys_platform() + possible_compiler_list = d_compiler.get(build_platform, d_compiler['default']) + return ' '.join(possible_compiler_list) + +def configure(conf): + """ + Detects a suitable D compiler + + :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found + """ + try: + test_for_compiler = conf.options.check_d_compiler or default_compilers() + except AttributeError: + conf.fatal("Add options(opt): opt.load('compiler_d')") + + for compiler in re.split('[ ,]+', test_for_compiler): + conf.env.stash() + conf.start_msg('Checking for %r (D compiler)' % compiler) + try: + conf.load(compiler) + except conf.errors.ConfigurationError as e: + conf.env.revert() + conf.end_msg(False) + Logs.debug('compiler_d: %r', e) + else: + if conf.env.D: + conf.end_msg(conf.env.get_flat('D')) + conf.env.COMPILER_D = compiler + conf.env.commit() + break + conf.env.revert() + conf.end_msg(False) + else: + conf.fatal('could not configure a D compiler!') + +def options(opt): + """ + This is how to provide compiler preferences on the command-line:: + + $ waf configure --check-d-compiler=dmd + """ + test_for_compiler = default_compilers() + d_compiler_opts = opt.add_option_group('Configuration options') + d_compiler_opts.add_option('--check-d-compiler', default=None, + help='list of D compilers to try [%s]' % test_for_compiler, dest='check_d_compiler') + + for x in test_for_compiler.split(): + opt.load('%s' % x) + diff --git a/third_party/waf/waflib/Tools/compiler_fc.py b/third_party/waf/waflib/Tools/compiler_fc.py new file mode 100644 index 0000000..96b58e7 --- /dev/null +++ b/third_party/waf/waflib/Tools/compiler_fc.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python +# encoding: utf-8 + +import re +from waflib import Utils, Logs +from waflib.Tools import fc + +fc_compiler = { + 'win32' : ['gfortran','ifort'], + 'darwin' : ['gfortran', 'g95', 'ifort'], + 'linux' : ['gfortran', 'g95', 'ifort'], + 'java' : ['gfortran', 'g95', 'ifort'], + 'default': ['gfortran'], + 'aix' : ['gfortran'] +} +""" +Dict mapping the platform names to lists of names of Fortran compilers to try, in order of preference:: + + from waflib.Tools.compiler_c import c_compiler + c_compiler['linux'] = ['gfortran', 'g95', 'ifort'] +""" + +def default_compilers(): + build_platform = Utils.unversioned_sys_platform() + possible_compiler_list = fc_compiler.get(build_platform, fc_compiler['default']) + return ' '.join(possible_compiler_list) + +def configure(conf): + """ + Detects a suitable Fortran compiler + + :raises: :py:class:`waflib.Errors.ConfigurationError` when no suitable compiler is found + """ + try: + test_for_compiler = conf.options.check_fortran_compiler or default_compilers() + except AttributeError: + conf.fatal("Add options(opt): opt.load('compiler_fc')") + for compiler in re.split('[ ,]+', test_for_compiler): + conf.env.stash() + conf.start_msg('Checking for %r (Fortran compiler)' % compiler) + try: + conf.load(compiler) + except conf.errors.ConfigurationError as e: + conf.env.revert() + conf.end_msg(False) + Logs.debug('compiler_fortran: %r', e) + else: + if conf.env.FC: + conf.end_msg(conf.env.get_flat('FC')) + conf.env.COMPILER_FORTRAN = compiler + conf.env.commit() + break + conf.env.revert() + conf.end_msg(False) + else: + conf.fatal('could not configure a Fortran compiler!') + +def options(opt): + """ + This is how to provide compiler preferences on the command-line:: + + $ waf configure --check-fortran-compiler=ifort + """ + test_for_compiler = default_compilers() + opt.load_special_tools('fc_*.py') + fortran_compiler_opts = opt.add_option_group('Configuration options') + fortran_compiler_opts.add_option('--check-fortran-compiler', default=None, + help='list of Fortran compiler to try [%s]' % test_for_compiler, + dest="check_fortran_compiler") + + for x in test_for_compiler.split(): + opt.load('%s' % x) + diff --git a/third_party/waf/waflib/Tools/cs.py b/third_party/waf/waflib/Tools/cs.py new file mode 100644 index 0000000..aecca6d --- /dev/null +++ b/third_party/waf/waflib/Tools/cs.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +""" +C# support. A simple example:: + + def configure(conf): + conf.load('cs') + def build(bld): + bld(features='cs', source='main.cs', gen='foo') + +Note that the configuration may compile C# snippets:: + + FRAG = ''' + namespace Moo { + public class Test { public static int Main(string[] args) { return 0; } } + }''' + def configure(conf): + conf.check(features='cs', fragment=FRAG, compile_filename='test.cs', gen='test.exe', + bintype='exe', csflags=['-pkg:gtk-sharp-2.0'], msg='Checking for Gtksharp support') +""" + +from waflib import Utils, Task, Options, Errors +from waflib.TaskGen import before_method, after_method, feature +from waflib.Tools import ccroot +from waflib.Configure import conf + +ccroot.USELIB_VARS['cs'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) +ccroot.lib_patterns['csshlib'] = ['%s'] + +@feature('cs') +@before_method('process_source') +def apply_cs(self): + """ + Create a C# task bound to the attribute *cs_task*. There can be only one C# task by task generator. + """ + cs_nodes = [] + no_nodes = [] + for x in self.to_nodes(self.source): + if x.name.endswith('.cs'): + cs_nodes.append(x) + else: + no_nodes.append(x) + self.source = no_nodes + + bintype = getattr(self, 'bintype', self.gen.endswith('.dll') and 'library' or 'exe') + self.cs_task = tsk = self.create_task('mcs', cs_nodes, self.path.find_or_declare(self.gen)) + tsk.env.CSTYPE = '/target:%s' % bintype + tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() + self.env.append_value('CSFLAGS', '/platform:%s' % getattr(self, 'platform', 'anycpu')) + + inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') + if inst_to: + # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically + mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) + self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) + +@feature('cs') +@after_method('apply_cs') +def use_cs(self): + """ + C# applications honor the **use** keyword:: + + def build(bld): + bld(features='cs', source='My.cs', bintype='library', gen='my.dll', name='mylib') + bld(features='cs', source='Hi.cs', includes='.', bintype='exe', gen='hi.exe', use='mylib', name='hi') + """ + names = self.to_list(getattr(self, 'use', [])) + get = self.bld.get_tgen_by_name + for x in names: + try: + y = get(x) + except Errors.WafError: + self.env.append_value('CSFLAGS', '/reference:%s' % x) + continue + y.post() + + tsk = getattr(y, 'cs_task', None) or getattr(y, 'link_task', None) + if not tsk: + self.bld.fatal('cs task has no link task for use %r' % self) + self.cs_task.dep_nodes.extend(tsk.outputs) # dependency + self.cs_task.set_run_after(tsk) # order (redundant, the order is inferred from the nodes inputs/outputs) + self.env.append_value('CSFLAGS', '/reference:%s' % tsk.outputs[0].abspath()) + +@feature('cs') +@after_method('apply_cs', 'use_cs') +def debug_cs(self): + """ + The C# targets may create .mdb or .pdb files:: + + def build(bld): + bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdebug='full') + # csdebug is a value in (True, 'full', 'pdbonly') + """ + csdebug = getattr(self, 'csdebug', self.env.CSDEBUG) + if not csdebug: + return + + node = self.cs_task.outputs[0] + if self.env.CS_NAME == 'mono': + out = node.parent.find_or_declare(node.name + '.mdb') + else: + out = node.change_ext('.pdb') + self.cs_task.outputs.append(out) + + if getattr(self, 'install_task', None): + self.pdb_install_task = self.add_install_files( + install_to=self.install_task.install_to, install_from=out) + + if csdebug == 'pdbonly': + val = ['/debug+', '/debug:pdbonly'] + elif csdebug == 'full': + val = ['/debug+', '/debug:full'] + else: + val = ['/debug-'] + self.env.append_value('CSFLAGS', val) + +@feature('cs') +@after_method('debug_cs') +def doc_cs(self): + """ + The C# targets may create .xml documentation files:: + + def build(bld): + bld(features='cs', source='My.cs', bintype='library', gen='my.dll', csdoc=True) + # csdoc is a boolean value + """ + csdoc = getattr(self, 'csdoc', self.env.CSDOC) + if not csdoc: + return + + node = self.cs_task.outputs[0] + out = node.change_ext('.xml') + self.cs_task.outputs.append(out) + + if getattr(self, 'install_task', None): + self.doc_install_task = self.add_install_files( + install_to=self.install_task.install_to, install_from=out) + + self.env.append_value('CSFLAGS', '/doc:%s' % out.abspath()) + +class mcs(Task.Task): + """ + Compile C# files + """ + color = 'YELLOW' + run_str = '${MCS} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' + + def split_argfile(self, cmd): + inline = [cmd[0]] + infile = [] + for x in cmd[1:]: + # csc doesn't want /noconfig in @file + if x.lower() == '/noconfig': + inline.append(x) + else: + infile.append(self.quote_flag(x)) + return (inline, infile) + +def configure(conf): + """ + Find a C# compiler, set the variable MCS for the compiler and CS_NAME (mono or csc) + """ + csc = getattr(Options.options, 'cscbinary', None) + if csc: + conf.env.MCS = csc + conf.find_program(['csc', 'mcs', 'gmcs'], var='MCS') + conf.env.ASS_ST = '/r:%s' + conf.env.RES_ST = '/resource:%s' + + conf.env.CS_NAME = 'csc' + if str(conf.env.MCS).lower().find('mcs') > -1: + conf.env.CS_NAME = 'mono' + +def options(opt): + """ + Add a command-line option for the configuration:: + + $ waf configure --with-csc-binary=/foo/bar/mcs + """ + opt.add_option('--with-csc-binary', type='string', dest='cscbinary') + +class fake_csshlib(Task.Task): + """ + Task used for reading a foreign .net assembly and adding the dependency on it + """ + color = 'YELLOW' + inst_to = None + + def runnable_status(self): + return Task.SKIP_ME + +@conf +def read_csshlib(self, name, paths=[]): + """ + Read a foreign .net assembly for the *use* system:: + + def build(bld): + bld.read_csshlib('ManagedLibrary.dll', paths=[bld.env.mylibrarypath]) + bld(features='cs', source='Hi.cs', bintype='exe', gen='hi.exe', use='ManagedLibrary.dll') + + :param name: Name of the library + :type name: string + :param paths: Folders in which the library may be found + :type paths: list of string + :return: A task generator having the feature *fake_lib* which will call :py:func:`waflib.Tools.ccroot.process_lib` + :rtype: :py:class:`waflib.TaskGen.task_gen` + """ + return self(name=name, features='fake_lib', lib_paths=paths, lib_type='csshlib') + diff --git a/third_party/waf/waflib/Tools/cxx.py b/third_party/waf/waflib/Tools/cxx.py new file mode 100644 index 0000000..194fad7 --- /dev/null +++ b/third_party/waf/waflib/Tools/cxx.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +"Base for c++ programs and libraries" + +from waflib import TaskGen, Task +from waflib.Tools import c_preproc +from waflib.Tools.ccroot import link_task, stlink_task + +@TaskGen.extension('.cpp','.cc','.cxx','.C','.c++') +def cxx_hook(self, node): + "Binds c++ file extensions to create :py:class:`waflib.Tools.cxx.cxx` instances" + return self.create_compiled_task('cxx', node) + +if not '.c' in TaskGen.task_gen.mappings: + TaskGen.task_gen.mappings['.c'] = TaskGen.task_gen.mappings['.cpp'] + +class cxx(Task.Task): + "Compiles C++ files into object files" + run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXX_SRC_F}${SRC} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' + vars = ['CXXDEPS'] # unused variable to depend on, just in case + ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] + scan = c_preproc.scan + +class cxxprogram(link_task): + "Links object files into c++ programs" + run_str = '${LINK_CXX} ${LINKFLAGS} ${CXXLNK_SRC_F}${SRC} ${CXXLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LDFLAGS}' + vars = ['LINKDEPS'] + ext_out = ['.bin'] + inst_to = '${BINDIR}' + +class cxxshlib(cxxprogram): + "Links object files into c++ shared libraries" + inst_to = '${LIBDIR}' + +class cxxstlib(stlink_task): + "Links object files into c++ static libraries" + pass # do not remove + diff --git a/third_party/waf/waflib/Tools/d.py b/third_party/waf/waflib/Tools/d.py new file mode 100644 index 0000000..e4cf73b --- /dev/null +++ b/third_party/waf/waflib/Tools/d.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Carlos Rafael Giani, 2007 (dv) +# Thomas Nagy, 2007-2018 (ita) + +from waflib import Utils, Task, Errors +from waflib.TaskGen import taskgen_method, feature, extension +from waflib.Tools import d_scan, d_config +from waflib.Tools.ccroot import link_task, stlink_task + +class d(Task.Task): + "Compile a d file into an object file" + color = 'GREEN' + run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_SRC_F:SRC} ${D_TGT_F:TGT}' + scan = d_scan.scan + +class d_with_header(d): + "Compile a d file and generate a header" + run_str = '${D} ${DFLAGS} ${DINC_ST:INCPATHS} ${D_HDR_F:tgt.outputs[1].bldpath()} ${D_SRC_F:SRC} ${D_TGT_F:tgt.outputs[0].bldpath()}' + +class d_header(Task.Task): + "Compile d headers" + color = 'BLUE' + run_str = '${D} ${D_HEADER} ${SRC}' + +class dprogram(link_task): + "Link object files into a d program" + run_str = '${D_LINKER} ${LINKFLAGS} ${DLNK_SRC_F}${SRC} ${DLNK_TGT_F:TGT} ${RPATH_ST:RPATH} ${DSTLIB_MARKER} ${DSTLIBPATH_ST:STLIBPATH} ${DSTLIB_ST:STLIB} ${DSHLIB_MARKER} ${DLIBPATH_ST:LIBPATH} ${DSHLIB_ST:LIB}' + inst_to = '${BINDIR}' + +class dshlib(dprogram): + "Link object files into a d shared library" + inst_to = '${LIBDIR}' + +class dstlib(stlink_task): + "Link object files into a d static library" + pass # do not remove + +@extension('.d', '.di', '.D') +def d_hook(self, node): + """ + Compile *D* files. To get .di files as well as .o files, set the following:: + + def build(bld): + bld.program(source='foo.d', target='app', generate_headers=True) + + """ + ext = Utils.destos_to_binfmt(self.env.DEST_OS) == 'pe' and 'obj' or 'o' + out = '%s.%d.%s' % (node.name, self.idx, ext) + def create_compiled_task(self, name, node): + task = self.create_task(name, node, node.parent.find_or_declare(out)) + try: + self.compiled_tasks.append(task) + except AttributeError: + self.compiled_tasks = [task] + return task + + if getattr(self, 'generate_headers', None): + tsk = create_compiled_task(self, 'd_with_header', node) + tsk.outputs.append(node.change_ext(self.env.DHEADER_ext)) + else: + tsk = create_compiled_task(self, 'd', node) + return tsk + +@taskgen_method +def generate_header(self, filename): + """ + See feature request #104:: + + def build(bld): + tg = bld.program(source='foo.d', target='app') + tg.generate_header('blah.d') + # is equivalent to: + #tg = bld.program(source='foo.d', target='app', header_lst='blah.d') + + :param filename: header to create + :type filename: string + """ + try: + self.header_lst.append([filename, self.install_path]) + except AttributeError: + self.header_lst = [[filename, self.install_path]] + +@feature('d') +def process_header(self): + """ + Process the attribute 'header_lst' to create the d header compilation tasks:: + + def build(bld): + bld.program(source='foo.d', target='app', header_lst='blah.d') + """ + for i in getattr(self, 'header_lst', []): + node = self.path.find_resource(i[0]) + if not node: + raise Errors.WafError('file %r not found on d obj' % i[0]) + self.create_task('d_header', node, node.change_ext('.di')) + diff --git a/third_party/waf/waflib/Tools/d_config.py b/third_party/waf/waflib/Tools/d_config.py new file mode 100644 index 0000000..6637556 --- /dev/null +++ b/third_party/waf/waflib/Tools/d_config.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2016-2018 (ita) + +from waflib import Utils +from waflib.Configure import conf + +@conf +def d_platform_flags(self): + """ + Sets the extensions dll/so for d programs and libraries + """ + v = self.env + if not v.DEST_OS: + v.DEST_OS = Utils.unversioned_sys_platform() + binfmt = Utils.destos_to_binfmt(self.env.DEST_OS) + if binfmt == 'pe': + v.dprogram_PATTERN = '%s.exe' + v.dshlib_PATTERN = 'lib%s.dll' + v.dstlib_PATTERN = 'lib%s.a' + elif binfmt == 'mac-o': + v.dprogram_PATTERN = '%s' + v.dshlib_PATTERN = 'lib%s.dylib' + v.dstlib_PATTERN = 'lib%s.a' + else: + v.dprogram_PATTERN = '%s' + v.dshlib_PATTERN = 'lib%s.so' + v.dstlib_PATTERN = 'lib%s.a' + +DLIB = ''' +version(D_Version2) { + import std.stdio; + int main() { + writefln("phobos2"); + return 0; + } +} else { + version(Tango) { + import tango.stdc.stdio; + int main() { + printf("tango"); + return 0; + } + } else { + import std.stdio; + int main() { + writefln("phobos1"); + return 0; + } + } +} +''' +"""Detection string for the D standard library""" + +@conf +def check_dlibrary(self, execute=True): + """ + Detects the kind of standard library that comes with the compiler, + and sets conf.env.DLIBRARY to tango, phobos1 or phobos2 + """ + ret = self.check_cc(features='d dprogram', fragment=DLIB, compile_filename='test.d', execute=execute, define_ret=True) + if execute: + self.env.DLIBRARY = ret.strip() + diff --git a/third_party/waf/waflib/Tools/d_scan.py b/third_party/waf/waflib/Tools/d_scan.py new file mode 100644 index 0000000..4e807a6 --- /dev/null +++ b/third_party/waf/waflib/Tools/d_scan.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2016-2018 (ita) + +""" +Provide a scanner for finding dependencies on d files +""" + +import re +from waflib import Utils + +def filter_comments(filename): + """ + :param filename: d file name + :type filename: string + :rtype: list + :return: a list of characters + """ + txt = Utils.readf(filename) + i = 0 + buf = [] + max = len(txt) + begin = 0 + while i < max: + c = txt[i] + if c == '"' or c == "'": # skip a string or character literal + buf.append(txt[begin:i]) + delim = c + i += 1 + while i < max: + c = txt[i] + if c == delim: + break + elif c == '\\': # skip the character following backslash + i += 1 + i += 1 + i += 1 + begin = i + elif c == '/': # try to replace a comment with whitespace + buf.append(txt[begin:i]) + i += 1 + if i == max: + break + c = txt[i] + if c == '+': # eat nesting /+ +/ comment + i += 1 + nesting = 1 + c = None + while i < max: + prev = c + c = txt[i] + if prev == '/' and c == '+': + nesting += 1 + c = None + elif prev == '+' and c == '/': + nesting -= 1 + if nesting == 0: + break + c = None + i += 1 + elif c == '*': # eat /* */ comment + i += 1 + c = None + while i < max: + prev = c + c = txt[i] + if prev == '*' and c == '/': + break + i += 1 + elif c == '/': # eat // comment + i += 1 + while i < max and txt[i] != '\n': + i += 1 + else: # no comment + begin = i - 1 + continue + i += 1 + begin = i + buf.append(' ') + else: + i += 1 + buf.append(txt[begin:]) + return buf + +class d_parser(object): + """ + Parser for d files + """ + def __init__(self, env, incpaths): + #self.code = '' + #self.module = '' + #self.imports = [] + + self.allnames = [] + + self.re_module = re.compile(r"module\s+([^;]+)") + self.re_import = re.compile(r"import\s+([^;]+)") + self.re_import_bindings = re.compile("([^:]+):(.*)") + self.re_import_alias = re.compile("[^=]+=(.+)") + + self.env = env + + self.nodes = [] + self.names = [] + + self.incpaths = incpaths + + def tryfind(self, filename): + """ + Search file a file matching an module/import directive + + :param filename: file to read + :type filename: string + """ + found = 0 + for n in self.incpaths: + found = n.find_resource(filename.replace('.', '/') + '.d') + if found: + self.nodes.append(found) + self.waiting.append(found) + break + if not found: + if not filename in self.names: + self.names.append(filename) + + def get_strings(self, code): + """ + :param code: d code to parse + :type code: string + :return: the modules that the code uses + :rtype: a list of match objects + """ + #self.imports = [] + self.module = '' + lst = [] + + # get the module name (if present) + + mod_name = self.re_module.search(code) + if mod_name: + self.module = re.sub(r'\s+', '', mod_name.group(1)) # strip all whitespaces + + # go through the code, have a look at all import occurrences + + # first, lets look at anything beginning with "import" and ending with ";" + import_iterator = self.re_import.finditer(code) + if import_iterator: + for import_match in import_iterator: + import_match_str = re.sub(r'\s+', '', import_match.group(1)) # strip all whitespaces + + # does this end with an import bindings declaration? + # (import bindings always terminate the list of imports) + bindings_match = self.re_import_bindings.match(import_match_str) + if bindings_match: + import_match_str = bindings_match.group(1) + # if so, extract the part before the ":" (since the module declaration(s) is/are located there) + + # split the matching string into a bunch of strings, separated by a comma + matches = import_match_str.split(',') + + for match in matches: + alias_match = self.re_import_alias.match(match) + if alias_match: + # is this an alias declaration? (alias = module name) if so, extract the module name + match = alias_match.group(1) + + lst.append(match) + return lst + + def start(self, node): + """ + The parsing starts here + + :param node: input file + :type node: :py:class:`waflib.Node.Node` + """ + self.waiting = [node] + # while the stack is not empty, add the dependencies + while self.waiting: + nd = self.waiting.pop(0) + self.iter(nd) + + def iter(self, node): + """ + Find all the modules that a file depends on, uses :py:meth:`waflib.Tools.d_scan.d_parser.tryfind` to process dependent files + + :param node: input file + :type node: :py:class:`waflib.Node.Node` + """ + path = node.abspath() # obtain the absolute path + code = "".join(filter_comments(path)) # read the file and filter the comments + names = self.get_strings(code) # obtain the import strings + for x in names: + # optimization + if x in self.allnames: + continue + self.allnames.append(x) + + # for each name, see if it is like a node or not + self.tryfind(x) + +def scan(self): + "look for .d/.di used by a d file" + env = self.env + gruik = d_parser(env, self.generator.includes_nodes) + node = self.inputs[0] + gruik.start(node) + nodes = gruik.nodes + names = gruik.names + return (nodes, names) + diff --git a/third_party/waf/waflib/Tools/dbus.py b/third_party/waf/waflib/Tools/dbus.py new file mode 100644 index 0000000..d520f1c --- /dev/null +++ b/third_party/waf/waflib/Tools/dbus.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Ali Sabil, 2007 + +""" +Compiles dbus files with **dbus-binding-tool** + +Typical usage:: + + def options(opt): + opt.load('compiler_c dbus') + def configure(conf): + conf.load('compiler_c dbus') + def build(bld): + tg = bld.program( + includes = '.', + source = bld.path.ant_glob('*.c'), + target = 'gnome-hello') + tg.add_dbus_file('test.xml', 'test_prefix', 'glib-server') +""" + +from waflib import Task, Errors +from waflib.TaskGen import taskgen_method, before_method + +@taskgen_method +def add_dbus_file(self, filename, prefix, mode): + """ + Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. + + :param filename: xml file to compile + :type filename: string + :param prefix: dbus binding tool prefix (--prefix=prefix) + :type prefix: string + :param mode: dbus binding tool mode (--mode=mode) + :type mode: string + """ + if not hasattr(self, 'dbus_lst'): + self.dbus_lst = [] + if not 'process_dbus' in self.meths: + self.meths.append('process_dbus') + self.dbus_lst.append([filename, prefix, mode]) + +@before_method('process_source') +def process_dbus(self): + """ + Processes the dbus files stored in the attribute *dbus_lst* to create :py:class:`waflib.Tools.dbus.dbus_binding_tool` instances. + """ + for filename, prefix, mode in getattr(self, 'dbus_lst', []): + node = self.path.find_resource(filename) + if not node: + raise Errors.WafError('file not found ' + filename) + tsk = self.create_task('dbus_binding_tool', node, node.change_ext('.h')) + tsk.env.DBUS_BINDING_TOOL_PREFIX = prefix + tsk.env.DBUS_BINDING_TOOL_MODE = mode + +class dbus_binding_tool(Task.Task): + """ + Compiles a dbus file + """ + color = 'BLUE' + ext_out = ['.h'] + run_str = '${DBUS_BINDING_TOOL} --prefix=${DBUS_BINDING_TOOL_PREFIX} --mode=${DBUS_BINDING_TOOL_MODE} --output=${TGT} ${SRC}' + shell = True # temporary workaround for #795 + +def configure(conf): + """ + Detects the program dbus-binding-tool and sets ``conf.env.DBUS_BINDING_TOOL`` + """ + conf.find_program('dbus-binding-tool', var='DBUS_BINDING_TOOL') + diff --git a/third_party/waf/waflib/Tools/dmd.py b/third_party/waf/waflib/Tools/dmd.py new file mode 100644 index 0000000..8917ca1 --- /dev/null +++ b/third_party/waf/waflib/Tools/dmd.py @@ -0,0 +1,80 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Carlos Rafael Giani, 2007 (dv) +# Thomas Nagy, 2008-2018 (ita) + +import sys +from waflib.Tools import ar, d +from waflib.Configure import conf + +@conf +def find_dmd(conf): + """ + Finds the program *dmd*, *dmd2*, or *ldc* and set the variable *D* + """ + conf.find_program(['dmd', 'dmd2', 'ldc'], var='D') + + # make sure that we're dealing with dmd1, dmd2, or ldc(1) + out = conf.cmd_and_log(conf.env.D + ['--help']) + if out.find("D Compiler v") == -1: + out = conf.cmd_and_log(conf.env.D + ['-version']) + if out.find("based on DMD v1.") == -1: + conf.fatal("detected compiler is not dmd/ldc") + +@conf +def common_flags_ldc(conf): + """ + Sets the D flags required by *ldc* + """ + v = conf.env + v.DFLAGS = ['-d-version=Posix'] + v.LINKFLAGS = [] + v.DFLAGS_dshlib = ['-relocation-model=pic'] + +@conf +def common_flags_dmd(conf): + """ + Set the flags required by *dmd* or *dmd2* + """ + v = conf.env + + v.D_SRC_F = ['-c'] + v.D_TGT_F = '-of%s' + + v.D_LINKER = v.D + v.DLNK_SRC_F = '' + v.DLNK_TGT_F = '-of%s' + v.DINC_ST = '-I%s' + + v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' + v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' + v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' + + v.LINKFLAGS_dprogram= ['-quiet'] + + v.DFLAGS_dshlib = ['-fPIC'] + v.LINKFLAGS_dshlib = ['-L-shared'] + + v.DHEADER_ext = '.di' + v.DFLAGS_d_with_header = ['-H', '-Hf'] + v.D_HDR_F = '%s' + +def configure(conf): + """ + Configuration for *dmd*, *dmd2*, and *ldc* + """ + conf.find_dmd() + + if sys.platform == 'win32': + out = conf.cmd_and_log(conf.env.D + ['--help']) + if out.find('D Compiler v2.') > -1: + conf.fatal('dmd2 on Windows is not supported, use gdc or ldc2 instead') + + conf.load('ar') + conf.load('d') + conf.common_flags_dmd() + conf.d_platform_flags() + + if str(conf.env.D).find('ldc') > -1: + conf.common_flags_ldc() + diff --git a/third_party/waf/waflib/Tools/errcheck.py b/third_party/waf/waflib/Tools/errcheck.py new file mode 100644 index 0000000..de8d75a --- /dev/null +++ b/third_party/waf/waflib/Tools/errcheck.py @@ -0,0 +1,237 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 (ita) + +""" +Common mistakes highlighting. + +There is a performance impact, so this tool is only loaded when running ``waf -v`` +""" + +typos = { +'feature':'features', +'sources':'source', +'targets':'target', +'include':'includes', +'export_include':'export_includes', +'define':'defines', +'importpath':'includes', +'installpath':'install_path', +'iscopy':'is_copy', +'uses':'use', +} + +meths_typos = ['__call__', 'program', 'shlib', 'stlib', 'objects'] + +import sys +from waflib import Logs, Build, Node, Task, TaskGen, ConfigSet, Errors, Utils +from waflib.Tools import ccroot + +def check_same_targets(self): + mp = Utils.defaultdict(list) + uids = {} + + def check_task(tsk): + if not isinstance(tsk, Task.Task): + return + if hasattr(tsk, 'no_errcheck_out'): + return + + for node in tsk.outputs: + mp[node].append(tsk) + try: + uids[tsk.uid()].append(tsk) + except KeyError: + uids[tsk.uid()] = [tsk] + + for g in self.groups: + for tg in g: + try: + for tsk in tg.tasks: + check_task(tsk) + except AttributeError: + # raised if not a task generator, which should be uncommon + check_task(tg) + + dupe = False + for (k, v) in mp.items(): + if len(v) > 1: + dupe = True + msg = '* Node %r is created more than once%s. The task generators are:' % (k, Logs.verbose == 1 and " (full message on 'waf -v -v')" or "") + Logs.error(msg) + for x in v: + if Logs.verbose > 1: + Logs.error(' %d. %r', 1 + v.index(x), x.generator) + else: + Logs.error(' %d. %r in %r', 1 + v.index(x), x.generator.name, getattr(x.generator, 'path', None)) + Logs.error('If you think that this is an error, set no_errcheck_out on the task instance') + + if not dupe: + for (k, v) in uids.items(): + if len(v) > 1: + Logs.error('* Several tasks use the same identifier. Please check the information on\n https://waf.io/apidocs/Task.html?highlight=uid#waflib.Task.Task.uid') + tg_details = tsk.generator.name + if Logs.verbose > 2: + tg_details = tsk.generator + for tsk in v: + Logs.error(' - object %r (%r) defined in %r', tsk.__class__.__name__, tsk, tg_details) + +def check_invalid_constraints(self): + feat = set() + for x in list(TaskGen.feats.values()): + feat.union(set(x)) + for (x, y) in TaskGen.task_gen.prec.items(): + feat.add(x) + feat.union(set(y)) + ext = set() + for x in TaskGen.task_gen.mappings.values(): + ext.add(x.__name__) + invalid = ext & feat + if invalid: + Logs.error('The methods %r have invalid annotations: @extension <-> @feature/@before_method/@after_method', list(invalid)) + + # the build scripts have been read, so we can check for invalid after/before attributes on task classes + for cls in list(Task.classes.values()): + if sys.hexversion > 0x3000000 and issubclass(cls, Task.Task) and isinstance(cls.hcode, str): + raise Errors.WafError('Class %r has hcode value %r of type <str>, expecting <bytes> (use Utils.h_cmd() ?)' % (cls, cls.hcode)) + + for x in ('before', 'after'): + for y in Utils.to_list(getattr(cls, x, [])): + if not Task.classes.get(y): + Logs.error('Erroneous order constraint %r=%r on task class %r', x, y, cls.__name__) + if getattr(cls, 'rule', None): + Logs.error('Erroneous attribute "rule" on task class %r (rename to "run_str")', cls.__name__) + +def replace(m): + """ + Replaces existing BuildContext methods to verify parameter names, + for example ``bld(source=)`` has no ending *s* + """ + oldcall = getattr(Build.BuildContext, m) + def call(self, *k, **kw): + ret = oldcall(self, *k, **kw) + for x in typos: + if x in kw: + if x == 'iscopy' and 'subst' in getattr(self, 'features', ''): + continue + Logs.error('Fix the typo %r -> %r on %r', x, typos[x], ret) + return ret + setattr(Build.BuildContext, m, call) + +def enhance_lib(): + """ + Modifies existing classes and methods to enable error verification + """ + for m in meths_typos: + replace(m) + + # catch '..' in ant_glob patterns + def ant_glob(self, *k, **kw): + if k: + lst = Utils.to_list(k[0]) + for pat in lst: + sp = pat.split('/') + if '..' in sp: + Logs.error("In ant_glob pattern %r: '..' means 'two dots', not 'parent directory'", k[0]) + if '.' in sp: + Logs.error("In ant_glob pattern %r: '.' means 'one dot', not 'current directory'", k[0]) + return self.old_ant_glob(*k, **kw) + Node.Node.old_ant_glob = Node.Node.ant_glob + Node.Node.ant_glob = ant_glob + + # catch ant_glob on build folders + def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True, quiet=False): + if remove: + try: + if self.is_child_of(self.ctx.bldnode) and not quiet: + quiet = True + Logs.error('Calling ant_glob on build folders (%r) is dangerous: add quiet=True / remove=False', self) + except AttributeError: + pass + return self.old_ant_iter(accept, maxdepth, pats, dir, src, remove, quiet) + Node.Node.old_ant_iter = Node.Node.ant_iter + Node.Node.ant_iter = ant_iter + + # catch conflicting ext_in/ext_out/before/after declarations + old = Task.is_before + def is_before(t1, t2): + ret = old(t1, t2) + if ret and old(t2, t1): + Logs.error('Contradictory order constraints in classes %r %r', t1, t2) + return ret + Task.is_before = is_before + + # check for bld(feature='cshlib') where no 'c' is given - this can be either a mistake or on purpose + # so we only issue a warning + def check_err_features(self): + lst = self.to_list(self.features) + if 'shlib' in lst: + Logs.error('feature shlib -> cshlib, dshlib or cxxshlib') + for x in ('c', 'cxx', 'd', 'fc'): + if not x in lst and lst and lst[0] in [x+y for y in ('program', 'shlib', 'stlib')]: + Logs.error('%r features is probably missing %r', self, x) + TaskGen.feature('*')(check_err_features) + + # check for erroneous order constraints + def check_err_order(self): + if not hasattr(self, 'rule') and not 'subst' in Utils.to_list(self.features): + for x in ('before', 'after', 'ext_in', 'ext_out'): + if hasattr(self, x): + Logs.warn('Erroneous order constraint %r on non-rule based task generator %r', x, self) + else: + for x in ('before', 'after'): + for y in self.to_list(getattr(self, x, [])): + if not Task.classes.get(y): + Logs.error('Erroneous order constraint %s=%r on %r (no such class)', x, y, self) + TaskGen.feature('*')(check_err_order) + + # check for @extension used with @feature/@before_method/@after_method + def check_compile(self): + check_invalid_constraints(self) + try: + ret = self.orig_compile() + finally: + check_same_targets(self) + return ret + Build.BuildContext.orig_compile = Build.BuildContext.compile + Build.BuildContext.compile = check_compile + + # check for invalid build groups #914 + def use_rec(self, name, **kw): + try: + y = self.bld.get_tgen_by_name(name) + except Errors.WafError: + pass + else: + idx = self.bld.get_group_idx(self) + odx = self.bld.get_group_idx(y) + if odx > idx: + msg = "Invalid 'use' across build groups:" + if Logs.verbose > 1: + msg += '\n target %r\n uses:\n %r' % (self, y) + else: + msg += " %r uses %r (try 'waf -v -v' for the full error)" % (self.name, name) + raise Errors.WafError(msg) + self.orig_use_rec(name, **kw) + TaskGen.task_gen.orig_use_rec = TaskGen.task_gen.use_rec + TaskGen.task_gen.use_rec = use_rec + + # check for env.append + def _getattr(self, name, default=None): + if name == 'append' or name == 'add': + raise Errors.WafError('env.append and env.add do not exist: use env.append_value/env.append_unique') + elif name == 'prepend': + raise Errors.WafError('env.prepend does not exist: use env.prepend_value') + if name in self.__slots__: + return super(ConfigSet.ConfigSet, self).__getattr__(name, default) + else: + return self[name] + ConfigSet.ConfigSet.__getattr__ = _getattr + + +def options(opt): + """ + Error verification can be enabled by default (not just on ``waf -v``) by adding to the user script options + """ + enhance_lib() + diff --git a/third_party/waf/waflib/Tools/fc.py b/third_party/waf/waflib/Tools/fc.py new file mode 100644 index 0000000..7fbd76d --- /dev/null +++ b/third_party/waf/waflib/Tools/fc.py @@ -0,0 +1,203 @@ +#! /usr/bin/env python +# encoding: utf-8 +# DC 2008 +# Thomas Nagy 2016-2018 (ita) + +""" +Fortran support +""" + +from waflib import Utils, Task, Errors +from waflib.Tools import ccroot, fc_config, fc_scan +from waflib.TaskGen import extension +from waflib.Configure import conf + +ccroot.USELIB_VARS['fc'] = set(['FCFLAGS', 'DEFINES', 'INCLUDES', 'FCPPFLAGS']) +ccroot.USELIB_VARS['fcprogram_test'] = ccroot.USELIB_VARS['fcprogram'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'LDFLAGS']) +ccroot.USELIB_VARS['fcshlib'] = set(['LIB', 'STLIB', 'LIBPATH', 'STLIBPATH', 'LINKFLAGS', 'RPATH', 'LINKDEPS', 'LDFLAGS']) +ccroot.USELIB_VARS['fcstlib'] = set(['ARFLAGS', 'LINKDEPS']) + +@extension('.f','.F','.f90','.F90','.for','.FOR','.f95','.F95','.f03','.F03','.f08','.F08') +def fc_hook(self, node): + "Binds the Fortran file extensions create :py:class:`waflib.Tools.fc.fc` instances" + return self.create_compiled_task('fc', node) + +@conf +def modfile(conf, name): + """ + Turns a module name into the right module file name. + Defaults to all lower case. + """ + if name.find(':') >= 0: + # Depending on a submodule! + separator = conf.env.FC_SUBMOD_SEPARATOR or '@' + # Ancestors of the submodule will be prefixed to the + # submodule name, separated by a colon. + modpath = name.split(':') + # Only the ancestor (actual) module and the submodule name + # will be used for the filename. + modname = modpath[0] + separator + modpath[-1] + suffix = conf.env.FC_SUBMOD_SUFFIX or '.smod' + else: + modname = name + suffix = '.mod' + + return {'lower' :modname.lower() + suffix.lower(), + 'lower.MOD' :modname.lower() + suffix.upper(), + 'UPPER.mod' :modname.upper() + suffix.lower(), + 'UPPER' :modname.upper() + suffix.upper()}[conf.env.FC_MOD_CAPITALIZATION or 'lower'] + +def get_fortran_tasks(tsk): + """ + Obtains all fortran tasks from the same build group. Those tasks must not have + the attribute 'nomod' or 'mod_fortran_done' + + :return: a list of :py:class:`waflib.Tools.fc.fc` instances + """ + bld = tsk.generator.bld + tasks = bld.get_tasks_group(bld.get_group_idx(tsk.generator)) + return [x for x in tasks if isinstance(x, fc) and not getattr(x, 'nomod', None) and not getattr(x, 'mod_fortran_done', None)] + +class fc(Task.Task): + """ + Fortran tasks can only run when all fortran tasks in a current task group are ready to be executed + This may cause a deadlock if some fortran task is waiting for something that cannot happen (circular dependency) + Should this ever happen, set the 'nomod=True' on those tasks instances to break the loop + """ + color = 'GREEN' + run_str = '${FC} ${FCFLAGS} ${FCINCPATH_ST:INCPATHS} ${FCDEFINES_ST:DEFINES} ${_FCMODOUTFLAGS} ${FC_TGT_F}${TGT[0].abspath()} ${FC_SRC_F}${SRC[0].abspath()} ${FCPPFLAGS}' + vars = ["FORTRANMODPATHFLAG"] + + def scan(self): + """Fortran dependency scanner""" + tmp = fc_scan.fortran_parser(self.generator.includes_nodes) + tmp.task = self + tmp.start(self.inputs[0]) + return (tmp.nodes, tmp.names) + + def runnable_status(self): + """ + Sets the mod file outputs and the dependencies on the mod files over all Fortran tasks + executed by the main thread so there are no concurrency issues + """ + if getattr(self, 'mod_fortran_done', None): + return super(fc, self).runnable_status() + + # now, if we reach this part it is because this fortran task is the first in the list + bld = self.generator.bld + + # obtain the fortran tasks + lst = get_fortran_tasks(self) + + # disable this method for other tasks + for tsk in lst: + tsk.mod_fortran_done = True + + # wait for all the .f tasks to be ready for execution + # and ensure that the scanners are called at least once + for tsk in lst: + ret = tsk.runnable_status() + if ret == Task.ASK_LATER: + # we have to wait for one of the other fortran tasks to be ready + # this may deadlock if there are dependencies between fortran tasks + # but this should not happen (we are setting them here!) + for x in lst: + x.mod_fortran_done = None + + return Task.ASK_LATER + + ins = Utils.defaultdict(set) + outs = Utils.defaultdict(set) + + # the .mod files to create + for tsk in lst: + key = tsk.uid() + for x in bld.raw_deps[key]: + if x.startswith('MOD@'): + name = bld.modfile(x.replace('MOD@', '')) + node = bld.srcnode.find_or_declare(name) + tsk.set_outputs(node) + outs[node].add(tsk) + + # the .mod files to use + for tsk in lst: + key = tsk.uid() + for x in bld.raw_deps[key]: + if x.startswith('USE@'): + name = bld.modfile(x.replace('USE@', '')) + node = bld.srcnode.find_resource(name) + if node and node not in tsk.outputs: + if not node in bld.node_deps[key]: + bld.node_deps[key].append(node) + ins[node].add(tsk) + + # if the intersection matches, set the order + for k in ins.keys(): + for a in ins[k]: + a.run_after.update(outs[k]) + for x in outs[k]: + self.generator.bld.producer.revdeps[x].add(a) + + # the scanner cannot output nodes, so we have to set them + # ourselves as task.dep_nodes (additional input nodes) + tmp = [] + for t in outs[k]: + tmp.extend(t.outputs) + a.dep_nodes.extend(tmp) + a.dep_nodes.sort(key=lambda x: x.abspath()) + + # the task objects have changed: clear the signature cache + for tsk in lst: + try: + delattr(tsk, 'cache_sig') + except AttributeError: + pass + + return super(fc, self).runnable_status() + +class fcprogram(ccroot.link_task): + """Links Fortran programs""" + color = 'YELLOW' + run_str = '${FC} ${LINKFLAGS} ${FCLNK_SRC_F}${SRC} ${FCLNK_TGT_F}${TGT[0].abspath()} ${RPATH_ST:RPATH} ${FCSTLIB_MARKER} ${FCSTLIBPATH_ST:STLIBPATH} ${FCSTLIB_ST:STLIB} ${FCSHLIB_MARKER} ${FCLIBPATH_ST:LIBPATH} ${FCLIB_ST:LIB} ${LDFLAGS}' + inst_to = '${BINDIR}' + +class fcshlib(fcprogram): + """Links Fortran libraries""" + inst_to = '${LIBDIR}' + +class fcstlib(ccroot.stlink_task): + """Links Fortran static libraries (uses ar by default)""" + pass # do not remove the pass statement + +class fcprogram_test(fcprogram): + """Custom link task to obtain compiler outputs for Fortran configuration tests""" + + def runnable_status(self): + """This task is always executed""" + ret = super(fcprogram_test, self).runnable_status() + if ret == Task.SKIP_ME: + ret = Task.RUN_ME + return ret + + def exec_command(self, cmd, **kw): + """Stores the compiler std our/err onto the build context, to bld.out + bld.err""" + bld = self.generator.bld + + kw['shell'] = isinstance(cmd, str) + kw['stdout'] = kw['stderr'] = Utils.subprocess.PIPE + kw['cwd'] = self.get_cwd() + bld.out = bld.err = '' + + bld.to_log('command: %s\n' % cmd) + + kw['output'] = 0 + try: + (bld.out, bld.err) = bld.cmd_and_log(cmd, **kw) + except Errors.WafError: + return -1 + + if bld.out: + bld.to_log('out: %s\n' % bld.out) + if bld.err: + bld.to_log('err: %s\n' % bld.err) + diff --git a/third_party/waf/waflib/Tools/fc_config.py b/third_party/waf/waflib/Tools/fc_config.py new file mode 100644 index 0000000..dc5e5c9 --- /dev/null +++ b/third_party/waf/waflib/Tools/fc_config.py @@ -0,0 +1,488 @@ +#! /usr/bin/env python +# encoding: utf-8 +# DC 2008 +# Thomas Nagy 2016-2018 (ita) + +""" +Fortran configuration helpers +""" + +import re, os, sys, shlex +from waflib.Configure import conf +from waflib.TaskGen import feature, before_method + +FC_FRAGMENT = ' program main\n end program main\n' +FC_FRAGMENT2 = ' PROGRAM MAIN\n END\n' # what's the actual difference between these? + +@conf +def fc_flags(conf): + """ + Defines common fortran configuration flags and file extensions + """ + v = conf.env + + v.FC_SRC_F = [] + v.FC_TGT_F = ['-c', '-o'] + v.FCINCPATH_ST = '-I%s' + v.FCDEFINES_ST = '-D%s' + + if not v.LINK_FC: + v.LINK_FC = v.FC + + v.FCLNK_SRC_F = [] + v.FCLNK_TGT_F = ['-o'] + + v.FCFLAGS_fcshlib = ['-fpic'] + v.LINKFLAGS_fcshlib = ['-shared'] + v.fcshlib_PATTERN = 'lib%s.so' + + v.fcstlib_PATTERN = 'lib%s.a' + + v.FCLIB_ST = '-l%s' + v.FCLIBPATH_ST = '-L%s' + v.FCSTLIB_ST = '-l%s' + v.FCSTLIBPATH_ST = '-L%s' + v.FCSTLIB_MARKER = '-Wl,-Bstatic' + v.FCSHLIB_MARKER = '-Wl,-Bdynamic' + + v.SONAME_ST = '-Wl,-h,%s' + +@conf +def fc_add_flags(conf): + """ + Adds FCFLAGS / LDFLAGS / LINKFLAGS from os.environ to conf.env + """ + conf.add_os_flags('FCPPFLAGS', dup=False) + conf.add_os_flags('FCFLAGS', dup=False) + conf.add_os_flags('LINKFLAGS', dup=False) + conf.add_os_flags('LDFLAGS', dup=False) + +@conf +def check_fortran(self, *k, **kw): + """ + Compiles a Fortran program to ensure that the settings are correct + """ + self.check_cc( + fragment = FC_FRAGMENT, + compile_filename = 'test.f', + features = 'fc fcprogram', + msg = 'Compiling a simple fortran app') + +@conf +def check_fc(self, *k, **kw): + """ + Same as :py:func:`waflib.Tools.c_config.check` but defaults to the *Fortran* programming language + (this overrides the C defaults in :py:func:`waflib.Tools.c_config.validate_c`) + """ + kw['compiler'] = 'fc' + if not 'compile_mode' in kw: + kw['compile_mode'] = 'fc' + if not 'type' in kw: + kw['type'] = 'fcprogram' + if not 'compile_filename' in kw: + kw['compile_filename'] = 'test.f90' + if not 'code' in kw: + kw['code'] = FC_FRAGMENT + return self.check(*k, **kw) + +# ------------------------------------------------------------------------ +# --- These are the default platform modifiers, refactored here for +# convenience. gfortran and g95 have much overlap. +# ------------------------------------------------------------------------ + +@conf +def fortran_modifier_darwin(conf): + """ + Defines Fortran flags and extensions for OSX systems + """ + v = conf.env + v.FCFLAGS_fcshlib = ['-fPIC'] + v.LINKFLAGS_fcshlib = ['-dynamiclib'] + v.fcshlib_PATTERN = 'lib%s.dylib' + v.FRAMEWORKPATH_ST = '-F%s' + v.FRAMEWORK_ST = ['-framework'] + + v.LINKFLAGS_fcstlib = [] + + v.FCSHLIB_MARKER = '' + v.FCSTLIB_MARKER = '' + v.SONAME_ST = '' + +@conf +def fortran_modifier_win32(conf): + """ + Defines Fortran flags for Windows platforms + """ + v = conf.env + v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' + + v.fcshlib_PATTERN = '%s.dll' + v.implib_PATTERN = '%s.dll.a' + v.IMPLIB_ST = '-Wl,--out-implib,%s' + + v.FCFLAGS_fcshlib = [] + + # Auto-import is enabled by default even without this option, + # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages + # that the linker emits otherwise. + v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) + +@conf +def fortran_modifier_cygwin(conf): + """ + Defines Fortran flags for use on cygwin + """ + fortran_modifier_win32(conf) + v = conf.env + v.fcshlib_PATTERN = 'cyg%s.dll' + v.append_value('LINKFLAGS_fcshlib', ['-Wl,--enable-auto-image-base']) + v.FCFLAGS_fcshlib = [] + +# ------------------------------------------------------------------------ + +@conf +def check_fortran_dummy_main(self, *k, **kw): + """ + Determines if a main function is needed by compiling a code snippet with + the C compiler and linking it with the Fortran compiler (useful on unix-like systems) + """ + if not self.env.CC: + self.fatal('A c compiler is required for check_fortran_dummy_main') + + lst = ['MAIN__', '__MAIN', '_MAIN', 'MAIN_', 'MAIN'] + lst.extend([m.lower() for m in lst]) + lst.append('') + + self.start_msg('Detecting whether we need a dummy main') + for main in lst: + kw['fortran_main'] = main + try: + self.check_cc( + fragment = 'int %s() { return 0; }\n' % (main or 'test'), + features = 'c fcprogram', + mandatory = True + ) + if not main: + self.env.FC_MAIN = -1 + self.end_msg('no') + else: + self.env.FC_MAIN = main + self.end_msg('yes %s' % main) + break + except self.errors.ConfigurationError: + pass + else: + self.end_msg('not found') + self.fatal('could not detect whether fortran requires a dummy main, see the config.log') + +# ------------------------------------------------------------------------ + +GCC_DRIVER_LINE = re.compile('^Driving:') +POSIX_STATIC_EXT = re.compile(r'\S+\.a') +POSIX_LIB_FLAGS = re.compile(r'-l\S+') + +@conf +def is_link_verbose(self, txt): + """Returns True if 'useful' link options can be found in txt""" + assert isinstance(txt, str) + for line in txt.splitlines(): + if not GCC_DRIVER_LINE.search(line): + if POSIX_STATIC_EXT.search(line) or POSIX_LIB_FLAGS.search(line): + return True + return False + +@conf +def check_fortran_verbose_flag(self, *k, **kw): + """ + Checks what kind of verbose (-v) flag works, then sets it to env.FC_VERBOSE_FLAG + """ + self.start_msg('fortran link verbose flag') + for x in ('-v', '--verbose', '-verbose', '-V'): + try: + self.check_cc( + features = 'fc fcprogram_test', + fragment = FC_FRAGMENT2, + compile_filename = 'test.f', + linkflags = [x], + mandatory=True) + except self.errors.ConfigurationError: + pass + else: + # output is on stderr or stdout (for xlf) + if self.is_link_verbose(self.test_bld.err) or self.is_link_verbose(self.test_bld.out): + self.end_msg(x) + break + else: + self.end_msg('failure') + self.fatal('Could not obtain the fortran link verbose flag (see config.log)') + + self.env.FC_VERBOSE_FLAG = x + return x + +# ------------------------------------------------------------------------ + +# linkflags which match those are ignored +LINKFLAGS_IGNORED = [r'-lang*', r'-lcrt[a-zA-Z0-9\.]*\.o', r'-lc$', r'-lSystem', r'-libmil', r'-LIST:*', r'-LNO:*'] +if os.name == 'nt': + LINKFLAGS_IGNORED.extend([r'-lfrt*', r'-luser32', r'-lkernel32', r'-ladvapi32', r'-lmsvcrt', r'-lshell32', r'-lmingw', r'-lmoldname']) +else: + LINKFLAGS_IGNORED.append(r'-lgcc*') +RLINKFLAGS_IGNORED = [re.compile(f) for f in LINKFLAGS_IGNORED] + +def _match_ignore(line): + """Returns True if the line should be ignored (Fortran verbose flag test)""" + for i in RLINKFLAGS_IGNORED: + if i.match(line): + return True + return False + +def parse_fortran_link(lines): + """Given the output of verbose link of Fortran compiler, this returns a + list of flags necessary for linking using the standard linker.""" + final_flags = [] + for line in lines: + if not GCC_DRIVER_LINE.match(line): + _parse_flink_line(line, final_flags) + return final_flags + +SPACE_OPTS = re.compile('^-[LRuYz]$') +NOSPACE_OPTS = re.compile('^-[RL]') + +def _parse_flink_token(lexer, token, tmp_flags): + # Here we go (convention for wildcard is shell, not regex !) + # 1 TODO: we first get some root .a libraries + # 2 TODO: take everything starting by -bI:* + # 3 Ignore the following flags: -lang* | -lcrt*.o | -lc | + # -lgcc* | -lSystem | -libmil | -LANG:=* | -LIST:* | -LNO:*) + # 4 take into account -lkernel32 + # 5 For options of the kind -[[LRuYz]], as they take one argument + # after, the actual option is the next token + # 6 For -YP,*: take and replace by -Larg where arg is the old + # argument + # 7 For -[lLR]*: take + + # step 3 + if _match_ignore(token): + pass + # step 4 + elif token.startswith('-lkernel32') and sys.platform == 'cygwin': + tmp_flags.append(token) + # step 5 + elif SPACE_OPTS.match(token): + t = lexer.get_token() + if t.startswith('P,'): + t = t[2:] + for opt in t.split(os.pathsep): + tmp_flags.append('-L%s' % opt) + # step 6 + elif NOSPACE_OPTS.match(token): + tmp_flags.append(token) + # step 7 + elif POSIX_LIB_FLAGS.match(token): + tmp_flags.append(token) + else: + # ignore anything not explicitly taken into account + pass + + t = lexer.get_token() + return t + +def _parse_flink_line(line, final_flags): + """private""" + lexer = shlex.shlex(line, posix = True) + lexer.whitespace_split = True + + t = lexer.get_token() + tmp_flags = [] + while t: + t = _parse_flink_token(lexer, t, tmp_flags) + + final_flags.extend(tmp_flags) + return final_flags + +@conf +def check_fortran_clib(self, autoadd=True, *k, **kw): + """ + Obtains the flags for linking with the C library + if this check works, add uselib='CLIB' to your task generators + """ + if not self.env.FC_VERBOSE_FLAG: + self.fatal('env.FC_VERBOSE_FLAG is not set: execute check_fortran_verbose_flag?') + + self.start_msg('Getting fortran runtime link flags') + try: + self.check_cc( + fragment = FC_FRAGMENT2, + compile_filename = 'test.f', + features = 'fc fcprogram_test', + linkflags = [self.env.FC_VERBOSE_FLAG] + ) + except Exception: + self.end_msg(False) + if kw.get('mandatory', True): + conf.fatal('Could not find the c library flags') + else: + out = self.test_bld.err + flags = parse_fortran_link(out.splitlines()) + self.end_msg('ok (%s)' % ' '.join(flags)) + self.env.LINKFLAGS_CLIB = flags + return flags + return [] + +def getoutput(conf, cmd, stdin=False): + """ + Obtains Fortran command outputs + """ + from waflib import Errors + if conf.env.env: + env = conf.env.env + else: + env = dict(os.environ) + env['LANG'] = 'C' + input = stdin and '\n'.encode() or None + try: + out, err = conf.cmd_and_log(cmd, env=env, output=0, input=input) + except Errors.WafError as e: + # An WafError might indicate an error code during the command + # execution, in this case we still obtain the stderr and stdout, + # which we can use to find the version string. + if not (hasattr(e, 'stderr') and hasattr(e, 'stdout')): + raise e + else: + # Ignore the return code and return the original + # stdout and stderr. + out = e.stdout + err = e.stderr + except Exception: + conf.fatal('could not determine the compiler version %r' % cmd) + return (out, err) + +# ------------------------------------------------------------------------ + +ROUTINES_CODE = """\ + subroutine foobar() + return + end + subroutine foo_bar() + return + end +""" + +MAIN_CODE = """ +void %(dummy_func_nounder)s(void); +void %(dummy_func_under)s(void); +int %(main_func_name)s() { + %(dummy_func_nounder)s(); + %(dummy_func_under)s(); + return 0; +} +""" + +@feature('link_main_routines_func') +@before_method('process_source') +def link_main_routines_tg_method(self): + """ + The configuration test declares a unique task generator, + so we create other task generators from there for fortran link tests + """ + def write_test_file(task): + task.outputs[0].write(task.generator.code) + bld = self.bld + bld(rule=write_test_file, target='main.c', code=MAIN_CODE % self.__dict__) + bld(rule=write_test_file, target='test.f', code=ROUTINES_CODE) + bld(features='fc fcstlib', source='test.f', target='test') + bld(features='c fcprogram', source='main.c', target='app', use='test') + +def mangling_schemes(): + """ + Generate triplets for use with mangle_name + (used in check_fortran_mangling) + the order is tuned for gfortan + """ + for u in ('_', ''): + for du in ('', '_'): + for c in ("lower", "upper"): + yield (u, du, c) + +def mangle_name(u, du, c, name): + """Mangle a name from a triplet (used in check_fortran_mangling)""" + return getattr(name, c)() + u + (name.find('_') != -1 and du or '') + +@conf +def check_fortran_mangling(self, *k, **kw): + """ + Detect the mangling scheme, sets FORTRAN_MANGLING to the triplet found + + This test will compile a fortran static library, then link a c app against it + """ + if not self.env.CC: + self.fatal('A c compiler is required for link_main_routines') + if not self.env.FC: + self.fatal('A fortran compiler is required for link_main_routines') + if not self.env.FC_MAIN: + self.fatal('Checking for mangling requires self.env.FC_MAIN (execute "check_fortran_dummy_main" first?)') + + self.start_msg('Getting fortran mangling scheme') + for (u, du, c) in mangling_schemes(): + try: + self.check_cc( + compile_filename = [], + features = 'link_main_routines_func', + msg = 'nomsg', + errmsg = 'nomsg', + dummy_func_nounder = mangle_name(u, du, c, 'foobar'), + dummy_func_under = mangle_name(u, du, c, 'foo_bar'), + main_func_name = self.env.FC_MAIN + ) + except self.errors.ConfigurationError: + pass + else: + self.end_msg("ok ('%s', '%s', '%s-case')" % (u, du, c)) + self.env.FORTRAN_MANGLING = (u, du, c) + break + else: + self.end_msg(False) + self.fatal('mangler not found') + return (u, du, c) + +@feature('pyext') +@before_method('propagate_uselib_vars', 'apply_link') +def set_lib_pat(self): + """Sets the Fortran flags for linking with Python""" + self.env.fcshlib_PATTERN = self.env.pyext_PATTERN + +@conf +def detect_openmp(self): + """ + Detects openmp flags and sets the OPENMP ``FCFLAGS``/``LINKFLAGS`` + """ + for x in ('-fopenmp','-openmp','-mp','-xopenmp','-omp','-qsmp=omp'): + try: + self.check_fc( + msg = 'Checking for OpenMP flag %s' % x, + fragment = 'program main\n call omp_get_num_threads()\nend program main', + fcflags = x, + linkflags = x, + uselib_store = 'OPENMP' + ) + except self.errors.ConfigurationError: + pass + else: + break + else: + self.fatal('Could not find OpenMP') + +@conf +def check_gfortran_o_space(self): + if self.env.FC_NAME != 'GFORTRAN' or int(self.env.FC_VERSION[0]) > 4: + # This is for old compilers and only for gfortran. + # No idea how other implementations handle this. Be safe and bail out. + return + self.env.stash() + self.env.FCLNK_TGT_F = ['-o', ''] + try: + self.check_fc(msg='Checking if the -o link must be split from arguments', fragment=FC_FRAGMENT, features='fc fcshlib') + except self.errors.ConfigurationError: + self.env.revert() + else: + self.env.commit() diff --git a/third_party/waf/waflib/Tools/fc_scan.py b/third_party/waf/waflib/Tools/fc_scan.py new file mode 100644 index 0000000..0824c92 --- /dev/null +++ b/third_party/waf/waflib/Tools/fc_scan.py @@ -0,0 +1,120 @@ +#! /usr/bin/env python +# encoding: utf-8 +# DC 2008 +# Thomas Nagy 2016-2018 (ita) + +import re + +INC_REGEX = r"""(?:^|['">]\s*;)\s*(?:|#\s*)INCLUDE\s+(?:\w+_)?[<"'](.+?)(?=["'>])""" +USE_REGEX = r"""(?:^|;)\s*USE(?:\s+|(?:(?:\s*,\s*(?:NON_)?INTRINSIC)?\s*::))\s*(\w+)""" +MOD_REGEX = r"""(?:^|;)\s*MODULE(?!\s+(?:PROCEDURE|SUBROUTINE|FUNCTION))\s+(\w+)""" +SMD_REGEX = r"""(?:^|;)\s*SUBMODULE\s*\(([\w:]+)\)\s*(\w+)""" + +re_inc = re.compile(INC_REGEX, re.I) +re_use = re.compile(USE_REGEX, re.I) +re_mod = re.compile(MOD_REGEX, re.I) +re_smd = re.compile(SMD_REGEX, re.I) + +class fortran_parser(object): + """ + This parser returns: + + * the nodes corresponding to the module names to produce + * the nodes corresponding to the include files used + * the module names used by the fortran files + """ + def __init__(self, incpaths): + self.seen = [] + """Files already parsed""" + + self.nodes = [] + """List of :py:class:`waflib.Node.Node` representing the dependencies to return""" + + self.names = [] + """List of module names to return""" + + self.incpaths = incpaths + """List of :py:class:`waflib.Node.Node` representing the include paths""" + + def find_deps(self, node): + """ + Parses a Fortran file to obtain the dependencies used/provided + + :param node: fortran file to read + :type node: :py:class:`waflib.Node.Node` + :return: lists representing the includes, the modules used, and the modules created by a fortran file + :rtype: tuple of list of strings + """ + txt = node.read() + incs = [] + uses = [] + mods = [] + for line in txt.splitlines(): + # line by line regexp search? optimize? + m = re_inc.search(line) + if m: + incs.append(m.group(1)) + m = re_use.search(line) + if m: + uses.append(m.group(1)) + m = re_mod.search(line) + if m: + mods.append(m.group(1)) + m = re_smd.search(line) + if m: + uses.append(m.group(1)) + mods.append('{0}:{1}'.format(m.group(1),m.group(2))) + return (incs, uses, mods) + + def start(self, node): + """ + Start parsing. Use the stack ``self.waiting`` to hold nodes to iterate on + + :param node: fortran file + :type node: :py:class:`waflib.Node.Node` + """ + self.waiting = [node] + while self.waiting: + nd = self.waiting.pop(0) + self.iter(nd) + + def iter(self, node): + """ + Processes a single file during dependency parsing. Extracts files used + modules used and modules provided. + """ + incs, uses, mods = self.find_deps(node) + for x in incs: + if x in self.seen: + continue + self.seen.append(x) + self.tryfind_header(x) + + for x in uses: + name = "USE@%s" % x + if not name in self.names: + self.names.append(name) + + for x in mods: + name = "MOD@%s" % x + if not name in self.names: + self.names.append(name) + + def tryfind_header(self, filename): + """ + Adds an include file to the list of nodes to process + + :param filename: file name + :type filename: string + """ + found = None + for n in self.incpaths: + found = n.find_resource(filename) + if found: + self.nodes.append(found) + self.waiting.append(found) + break + if not found: + if not filename in self.names: + self.names.append(filename) + diff --git a/third_party/waf/waflib/Tools/flex.py b/third_party/waf/waflib/Tools/flex.py new file mode 100644 index 0000000..2256657 --- /dev/null +++ b/third_party/waf/waflib/Tools/flex.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +# encoding: utf-8 +# John O'Meara, 2006 +# Thomas Nagy, 2006-2018 (ita) + +""" +The **flex** program is a code generator which creates C or C++ files. +The generated files are compiled into object files. +""" + +import os, re +from waflib import Task, TaskGen +from waflib.Tools import ccroot + +def decide_ext(self, node): + if 'cxx' in self.features: + return ['.lex.cc'] + return ['.lex.c'] + +def flexfun(tsk): + env = tsk.env + bld = tsk.generator.bld + wd = bld.variant_dir + def to_list(xx): + if isinstance(xx, str): + return [xx] + return xx + tsk.last_cmd = lst = [] + lst.extend(to_list(env.FLEX)) + lst.extend(to_list(env.FLEXFLAGS)) + inputs = [a.path_from(tsk.get_cwd()) for a in tsk.inputs] + if env.FLEX_MSYS: + inputs = [x.replace(os.sep, '/') for x in inputs] + lst.extend(inputs) + lst = [x for x in lst if x] + txt = bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) + tsk.outputs[0].write(txt.replace('\r\n', '\n').replace('\r', '\n')) # issue #1207 + +TaskGen.declare_chain( + name = 'flex', + rule = flexfun, # issue #854 + ext_in = '.l', + decider = decide_ext, +) + +# To support the following: +# bld(features='c', flexflags='-P/foo') +Task.classes['flex'].vars = ['FLEXFLAGS', 'FLEX'] +ccroot.USELIB_VARS['c'].add('FLEXFLAGS') +ccroot.USELIB_VARS['cxx'].add('FLEXFLAGS') + +def configure(conf): + """ + Detect the *flex* program + """ + conf.find_program('flex', var='FLEX') + conf.env.FLEXFLAGS = ['-t'] + + if re.search (r"\\msys\\[0-9.]+\\bin\\flex.exe$", conf.env.FLEX[0]): + # this is the flex shipped with MSYS + conf.env.FLEX_MSYS = True + diff --git a/third_party/waf/waflib/Tools/g95.py b/third_party/waf/waflib/Tools/g95.py new file mode 100644 index 0000000..f69ba4f --- /dev/null +++ b/third_party/waf/waflib/Tools/g95.py @@ -0,0 +1,66 @@ +#! /usr/bin/env python +# encoding: utf-8 +# KWS 2010 +# Thomas Nagy 2016-2018 (ita) + +import re +from waflib import Utils +from waflib.Tools import fc, fc_config, fc_scan, ar +from waflib.Configure import conf + +@conf +def find_g95(conf): + fc = conf.find_program('g95', var='FC') + conf.get_g95_version(fc) + conf.env.FC_NAME = 'G95' + +@conf +def g95_flags(conf): + v = conf.env + v.FCFLAGS_fcshlib = ['-fPIC'] + v.FORTRANMODFLAG = ['-fmod=', ''] # template for module path + v.FCFLAGS_DEBUG = ['-Werror'] # why not + +@conf +def g95_modifier_win32(conf): + fc_config.fortran_modifier_win32(conf) + +@conf +def g95_modifier_cygwin(conf): + fc_config.fortran_modifier_cygwin(conf) + +@conf +def g95_modifier_darwin(conf): + fc_config.fortran_modifier_darwin(conf) + +@conf +def g95_modifier_platform(conf): + dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() + g95_modifier_func = getattr(conf, 'g95_modifier_' + dest_os, None) + if g95_modifier_func: + g95_modifier_func() + +@conf +def get_g95_version(conf, fc): + """get the compiler version""" + + version_re = re.compile(r"g95\s*(?P<major>\d*)\.(?P<minor>\d*)").search + cmd = fc + ['--version'] + out, err = fc_config.getoutput(conf, cmd, stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('cannot determine g95 version') + k = match.groupdict() + conf.env.FC_VERSION = (k['major'], k['minor']) + +def configure(conf): + conf.find_g95() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.g95_flags() + conf.g95_modifier_platform() + diff --git a/third_party/waf/waflib/Tools/gas.py b/third_party/waf/waflib/Tools/gas.py new file mode 100644 index 0000000..4a8745a --- /dev/null +++ b/third_party/waf/waflib/Tools/gas.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2008-2018 (ita) + +"Detect as/gas/gcc for compiling assembly files" + +import waflib.Tools.asm # - leave this +from waflib.Tools import ar + +def configure(conf): + """ + Find the programs gas/as/gcc and set the variable *AS* + """ + conf.find_program(['gas', 'gcc'], var='AS') + conf.env.AS_TGT_F = ['-c', '-o'] + conf.env.ASLNK_TGT_F = ['-o'] + conf.find_ar() + conf.load('asm') + conf.env.ASM_NAME = 'gas' diff --git a/third_party/waf/waflib/Tools/gcc.py b/third_party/waf/waflib/Tools/gcc.py new file mode 100644 index 0000000..acdd473 --- /dev/null +++ b/third_party/waf/waflib/Tools/gcc.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) +# Yinon Ehrlich, 2009 + +""" +gcc/llvm detection. +""" + +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_gcc(conf): + """ + Find the program gcc, and if present, try to detect its version number + """ + cc = conf.find_program(['gcc', 'cc'], var='CC') + conf.get_cc_version(cc, gcc=True) + conf.env.CC_NAME = 'gcc' + +@conf +def gcc_common_flags(conf): + """ + Common flags for gcc on nearly all platforms + """ + v = conf.env + + v.CC_SRC_F = [] + v.CC_TGT_F = ['-c', '-o'] + + if not v.LINK_CC: + v.LINK_CC = v.CC + + v.CCLNK_SRC_F = [] + v.CCLNK_TGT_F = ['-o'] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + v.RPATH_ST = '-Wl,-rpath,%s' + + v.SONAME_ST = '-Wl,-h,%s' + v.SHLIB_MARKER = '-Wl,-Bdynamic' + v.STLIB_MARKER = '-Wl,-Bstatic' + + v.cprogram_PATTERN = '%s' + + v.CFLAGS_cshlib = ['-fPIC'] + v.LINKFLAGS_cshlib = ['-shared'] + v.cshlib_PATTERN = 'lib%s.so' + + v.LINKFLAGS_cstlib = ['-Wl,-Bstatic'] + v.cstlib_PATTERN = 'lib%s.a' + + v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] + v.CFLAGS_MACBUNDLE = ['-fPIC'] + v.macbundle_PATTERN = '%s.bundle' + +@conf +def gcc_modifier_win32(conf): + """Configuration flags for executing gcc on Windows""" + v = conf.env + v.cprogram_PATTERN = '%s.exe' + + v.cshlib_PATTERN = '%s.dll' + v.implib_PATTERN = '%s.dll.a' + v.IMPLIB_ST = '-Wl,--out-implib,%s' + + v.CFLAGS_cshlib = [] + + # Auto-import is enabled by default even without this option, + # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages + # that the linker emits otherwise. + v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) + +@conf +def gcc_modifier_cygwin(conf): + """Configuration flags for executing gcc on Cygwin""" + gcc_modifier_win32(conf) + v = conf.env + v.cshlib_PATTERN = 'cyg%s.dll' + v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base']) + v.CFLAGS_cshlib = [] + +@conf +def gcc_modifier_darwin(conf): + """Configuration flags for executing gcc on MacOS""" + v = conf.env + v.CFLAGS_cshlib = ['-fPIC'] + v.LINKFLAGS_cshlib = ['-dynamiclib'] + v.cshlib_PATTERN = 'lib%s.dylib' + v.FRAMEWORKPATH_ST = '-F%s' + v.FRAMEWORK_ST = ['-framework'] + v.ARCH_ST = ['-arch'] + + v.LINKFLAGS_cstlib = [] + + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + v.SONAME_ST = [] + +@conf +def gcc_modifier_aix(conf): + """Configuration flags for executing gcc on AIX""" + v = conf.env + v.LINKFLAGS_cprogram = ['-Wl,-brtl'] + v.LINKFLAGS_cshlib = ['-shared','-Wl,-brtl,-bexpfull'] + v.SHLIB_MARKER = [] + +@conf +def gcc_modifier_hpux(conf): + v = conf.env + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + v.CFLAGS_cshlib = ['-fPIC','-DPIC'] + v.cshlib_PATTERN = 'lib%s.sl' + +@conf +def gcc_modifier_openbsd(conf): + conf.env.SONAME_ST = [] + +@conf +def gcc_modifier_osf1V(conf): + v = conf.env + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + v.SONAME_ST = [] + +@conf +def gcc_modifier_platform(conf): + """Execute platform-specific functions based on *gcc_modifier_+NAME*""" + # * set configurations specific for a platform. + # * the destination platform is detected automatically by looking at the macros the compiler predefines, + # and if it's not recognised, it fallbacks to sys.platform. + gcc_modifier_func = getattr(conf, 'gcc_modifier_' + conf.env.DEST_OS, None) + if gcc_modifier_func: + gcc_modifier_func() + +def configure(conf): + """ + Configuration for gcc + """ + conf.find_gcc() + conf.find_ar() + conf.gcc_common_flags() + conf.gcc_modifier_platform() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + conf.check_gcc_o_space() + diff --git a/third_party/waf/waflib/Tools/gdc.py b/third_party/waf/waflib/Tools/gdc.py new file mode 100644 index 0000000..d89a66d --- /dev/null +++ b/third_party/waf/waflib/Tools/gdc.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Carlos Rafael Giani, 2007 (dv) + +from waflib.Tools import ar, d +from waflib.Configure import conf + +@conf +def find_gdc(conf): + """ + Finds the program gdc and set the variable *D* + """ + conf.find_program('gdc', var='D') + + out = conf.cmd_and_log(conf.env.D + ['--version']) + if out.find("gdc") == -1: + conf.fatal("detected compiler is not gdc") + +@conf +def common_flags_gdc(conf): + """ + Sets the flags required by *gdc* + """ + v = conf.env + + v.DFLAGS = [] + + v.D_SRC_F = ['-c'] + v.D_TGT_F = '-o%s' + + v.D_LINKER = v.D + v.DLNK_SRC_F = '' + v.DLNK_TGT_F = '-o%s' + v.DINC_ST = '-I%s' + + v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' + v.DSTLIB_ST = v.DSHLIB_ST = '-l%s' + v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L%s' + + v.LINKFLAGS_dshlib = ['-shared'] + + v.DHEADER_ext = '.di' + v.DFLAGS_d_with_header = '-fintfc' + v.D_HDR_F = '-fintfc-file=%s' + +def configure(conf): + """ + Configuration for gdc + """ + conf.find_gdc() + conf.load('ar') + conf.load('d') + conf.common_flags_gdc() + conf.d_platform_flags() + diff --git a/third_party/waf/waflib/Tools/gfortran.py b/third_party/waf/waflib/Tools/gfortran.py new file mode 100644 index 0000000..1050667 --- /dev/null +++ b/third_party/waf/waflib/Tools/gfortran.py @@ -0,0 +1,93 @@ +#! /usr/bin/env python +# encoding: utf-8 +# DC 2008 +# Thomas Nagy 2016-2018 (ita) + +import re +from waflib import Utils +from waflib.Tools import fc, fc_config, fc_scan, ar +from waflib.Configure import conf + +@conf +def find_gfortran(conf): + """Find the gfortran program (will look in the environment variable 'FC')""" + fc = conf.find_program(['gfortran','g77'], var='FC') + # (fallback to g77 for systems, where no gfortran is available) + conf.get_gfortran_version(fc) + conf.env.FC_NAME = 'GFORTRAN' + +@conf +def gfortran_flags(conf): + v = conf.env + v.FCFLAGS_fcshlib = ['-fPIC'] + v.FORTRANMODFLAG = ['-J', ''] # template for module path + v.FCFLAGS_DEBUG = ['-Werror'] # why not + +@conf +def gfortran_modifier_win32(conf): + fc_config.fortran_modifier_win32(conf) + +@conf +def gfortran_modifier_cygwin(conf): + fc_config.fortran_modifier_cygwin(conf) + +@conf +def gfortran_modifier_darwin(conf): + fc_config.fortran_modifier_darwin(conf) + +@conf +def gfortran_modifier_platform(conf): + dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() + gfortran_modifier_func = getattr(conf, 'gfortran_modifier_' + dest_os, None) + if gfortran_modifier_func: + gfortran_modifier_func() + +@conf +def get_gfortran_version(conf, fc): + """Get the compiler version""" + + # ensure this is actually gfortran, not an imposter. + version_re = re.compile(r"GNU\s*Fortran", re.I).search + cmd = fc + ['--version'] + out, err = fc_config.getoutput(conf, cmd, stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not determine the compiler type') + + # --- now get more detailed info -- see c_config.get_cc_version + cmd = fc + ['-dM', '-E', '-'] + out, err = fc_config.getoutput(conf, cmd, stdin=True) + + if out.find('__GNUC__') < 0: + conf.fatal('Could not determine the compiler type') + + k = {} + out = out.splitlines() + import shlex + + for line in out: + lst = shlex.split(line) + if len(lst)>2: + key = lst[1] + val = lst[2] + k[key] = val + + def isD(var): + return var in k + + def isT(var): + return var in k and k[var] != '0' + + conf.env.FC_VERSION = (k['__GNUC__'], k['__GNUC_MINOR__'], k['__GNUC_PATCHLEVEL__']) + +def configure(conf): + conf.find_gfortran() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.gfortran_flags() + conf.gfortran_modifier_platform() + conf.check_gfortran_o_space() diff --git a/third_party/waf/waflib/Tools/glib2.py b/third_party/waf/waflib/Tools/glib2.py new file mode 100644 index 0000000..949fe37 --- /dev/null +++ b/third_party/waf/waflib/Tools/glib2.py @@ -0,0 +1,489 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +""" +Support for GLib2 tools: + +* marshal +* enums +* gsettings +* gresource +""" + +import os +import functools +from waflib import Context, Task, Utils, Options, Errors, Logs +from waflib.TaskGen import taskgen_method, before_method, feature, extension +from waflib.Configure import conf + +################## marshal files + +@taskgen_method +def add_marshal_file(self, filename, prefix): + """ + Adds a file to the list of marshal files to process. Store them in the attribute *marshal_list*. + + :param filename: xml file to compile + :type filename: string + :param prefix: marshal prefix (--prefix=prefix) + :type prefix: string + """ + if not hasattr(self, 'marshal_list'): + self.marshal_list = [] + self.meths.append('process_marshal') + self.marshal_list.append((filename, prefix)) + +@before_method('process_source') +def process_marshal(self): + """ + Processes the marshal files stored in the attribute *marshal_list* to create :py:class:`waflib.Tools.glib2.glib_genmarshal` instances. + Adds the c file created to the list of source to process. + """ + for f, prefix in getattr(self, 'marshal_list', []): + node = self.path.find_resource(f) + + if not node: + raise Errors.WafError('file not found %r' % f) + + h_node = node.change_ext('.h') + c_node = node.change_ext('.c') + + task = self.create_task('glib_genmarshal', node, [h_node, c_node]) + task.env.GLIB_GENMARSHAL_PREFIX = prefix + self.source = self.to_nodes(getattr(self, 'source', [])) + self.source.append(c_node) + +class glib_genmarshal(Task.Task): + vars = ['GLIB_GENMARSHAL_PREFIX', 'GLIB_GENMARSHAL'] + color = 'BLUE' + ext_out = ['.h'] + def run(self): + bld = self.generator.bld + + get = self.env.get_flat + cmd1 = "%s %s --prefix=%s --header > %s" % ( + get('GLIB_GENMARSHAL'), + self.inputs[0].srcpath(), + get('GLIB_GENMARSHAL_PREFIX'), + self.outputs[0].abspath() + ) + + ret = bld.exec_command(cmd1) + if ret: + return ret + + #print self.outputs[1].abspath() + c = '''#include "%s"\n''' % self.outputs[0].name + self.outputs[1].write(c) + + cmd2 = "%s %s --prefix=%s --body >> %s" % ( + get('GLIB_GENMARSHAL'), + self.inputs[0].srcpath(), + get('GLIB_GENMARSHAL_PREFIX'), + self.outputs[1].abspath() + ) + return bld.exec_command(cmd2) + +########################## glib-mkenums + +@taskgen_method +def add_enums_from_template(self, source='', target='', template='', comments=''): + """ + Adds a file to the list of enum files to process. Stores them in the attribute *enums_list*. + + :param source: enum file to process + :type source: string + :param target: target file + :type target: string + :param template: template file + :type template: string + :param comments: comments + :type comments: string + """ + if not hasattr(self, 'enums_list'): + self.enums_list = [] + self.meths.append('process_enums') + self.enums_list.append({'source': source, + 'target': target, + 'template': template, + 'file-head': '', + 'file-prod': '', + 'file-tail': '', + 'enum-prod': '', + 'value-head': '', + 'value-prod': '', + 'value-tail': '', + 'comments': comments}) + +@taskgen_method +def add_enums(self, source='', target='', + file_head='', file_prod='', file_tail='', enum_prod='', + value_head='', value_prod='', value_tail='', comments=''): + """ + Adds a file to the list of enum files to process. Stores them in the attribute *enums_list*. + + :param source: enum file to process + :type source: string + :param target: target file + :type target: string + :param file_head: unused + :param file_prod: unused + :param file_tail: unused + :param enum_prod: unused + :param value_head: unused + :param value_prod: unused + :param value_tail: unused + :param comments: comments + :type comments: string + """ + if not hasattr(self, 'enums_list'): + self.enums_list = [] + self.meths.append('process_enums') + self.enums_list.append({'source': source, + 'template': '', + 'target': target, + 'file-head': file_head, + 'file-prod': file_prod, + 'file-tail': file_tail, + 'enum-prod': enum_prod, + 'value-head': value_head, + 'value-prod': value_prod, + 'value-tail': value_tail, + 'comments': comments}) + +@before_method('process_source') +def process_enums(self): + """ + Processes the enum files stored in the attribute *enum_list* to create :py:class:`waflib.Tools.glib2.glib_mkenums` instances. + """ + for enum in getattr(self, 'enums_list', []): + task = self.create_task('glib_mkenums') + env = task.env + + inputs = [] + + # process the source + source_list = self.to_list(enum['source']) + if not source_list: + raise Errors.WafError('missing source ' + str(enum)) + source_list = [self.path.find_resource(k) for k in source_list] + inputs += source_list + env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list] + + # find the target + if not enum['target']: + raise Errors.WafError('missing target ' + str(enum)) + tgt_node = self.path.find_or_declare(enum['target']) + if tgt_node.name.endswith('.c'): + self.source.append(tgt_node) + env.GLIB_MKENUMS_TARGET = tgt_node.abspath() + + + options = [] + + if enum['template']: # template, if provided + template_node = self.path.find_resource(enum['template']) + options.append('--template %s' % (template_node.abspath())) + inputs.append(template_node) + params = {'file-head' : '--fhead', + 'file-prod' : '--fprod', + 'file-tail' : '--ftail', + 'enum-prod' : '--eprod', + 'value-head' : '--vhead', + 'value-prod' : '--vprod', + 'value-tail' : '--vtail', + 'comments': '--comments'} + for param, option in params.items(): + if enum[param]: + options.append('%s %r' % (option, enum[param])) + + env.GLIB_MKENUMS_OPTIONS = ' '.join(options) + + # update the task instance + task.set_inputs(inputs) + task.set_outputs(tgt_node) + +class glib_mkenums(Task.Task): + """ + Processes enum files + """ + run_str = '${GLIB_MKENUMS} ${GLIB_MKENUMS_OPTIONS} ${GLIB_MKENUMS_SOURCE} > ${GLIB_MKENUMS_TARGET}' + color = 'PINK' + ext_out = ['.h'] + +######################################### gsettings + +@taskgen_method +def add_settings_schemas(self, filename_list): + """ + Adds settings files to process to *settings_schema_files* + + :param filename_list: files + :type filename_list: list of string + """ + if not hasattr(self, 'settings_schema_files'): + self.settings_schema_files = [] + + if not isinstance(filename_list, list): + filename_list = [filename_list] + + self.settings_schema_files.extend(filename_list) + +@taskgen_method +def add_settings_enums(self, namespace, filename_list): + """ + Called only once by task generator to set the enums namespace. + + :param namespace: namespace + :type namespace: string + :param filename_list: enum files to process + :type filename_list: file list + """ + if hasattr(self, 'settings_enum_namespace'): + raise Errors.WafError("Tried to add gsettings enums to %r more than once" % self.name) + self.settings_enum_namespace = namespace + + if not isinstance(filename_list, list): + filename_list = [filename_list] + self.settings_enum_files = filename_list + +@feature('glib2') +def process_settings(self): + """ + Processes the schema files in *settings_schema_files* to create :py:class:`waflib.Tools.glib2.glib_mkenums` instances. The + same files are validated through :py:class:`waflib.Tools.glib2.glib_validate_schema` tasks. + + """ + enums_tgt_node = [] + install_files = [] + + settings_schema_files = getattr(self, 'settings_schema_files', []) + if settings_schema_files and not self.env.GLIB_COMPILE_SCHEMAS: + raise Errors.WafError ("Unable to process GSettings schemas - glib-compile-schemas was not found during configure") + + # 1. process gsettings_enum_files (generate .enums.xml) + # + if hasattr(self, 'settings_enum_files'): + enums_task = self.create_task('glib_mkenums') + + source_list = self.settings_enum_files + source_list = [self.path.find_resource(k) for k in source_list] + enums_task.set_inputs(source_list) + enums_task.env.GLIB_MKENUMS_SOURCE = [k.abspath() for k in source_list] + + target = self.settings_enum_namespace + '.enums.xml' + tgt_node = self.path.find_or_declare(target) + enums_task.set_outputs(tgt_node) + enums_task.env.GLIB_MKENUMS_TARGET = tgt_node.abspath() + enums_tgt_node = [tgt_node] + + install_files.append(tgt_node) + + options = '--comments "<!-- @comment@ -->" --fhead "<schemalist>" --vhead " <@type@ id=\\"%s.@EnumName@\\">" --vprod " <value nick=\\"@valuenick@\\" value=\\"@valuenum@\\"/>" --vtail " </@type@>" --ftail "</schemalist>" ' % (self.settings_enum_namespace) + enums_task.env.GLIB_MKENUMS_OPTIONS = options + + # 2. process gsettings_schema_files (validate .gschema.xml files) + # + for schema in settings_schema_files: + schema_task = self.create_task ('glib_validate_schema') + + schema_node = self.path.find_resource(schema) + if not schema_node: + raise Errors.WafError("Cannot find the schema file %r" % schema) + install_files.append(schema_node) + source_list = enums_tgt_node + [schema_node] + + schema_task.set_inputs (source_list) + schema_task.env.GLIB_COMPILE_SCHEMAS_OPTIONS = [("--schema-file=" + k.abspath()) for k in source_list] + + target_node = schema_node.change_ext('.xml.valid') + schema_task.set_outputs (target_node) + schema_task.env.GLIB_VALIDATE_SCHEMA_OUTPUT = target_node.abspath() + + # 3. schemas install task + def compile_schemas_callback(bld): + if not bld.is_install: + return + compile_schemas = Utils.to_list(bld.env.GLIB_COMPILE_SCHEMAS) + destdir = Options.options.destdir + paths = bld._compile_schemas_registered + if destdir: + paths = (os.path.join(destdir, path.lstrip(os.sep)) for path in paths) + for path in paths: + Logs.pprint('YELLOW', 'Updating GSettings schema cache %r' % path) + if self.bld.exec_command(compile_schemas + [path]): + Logs.warn('Could not update GSettings schema cache %r' % path) + + if self.bld.is_install: + schemadir = self.env.GSETTINGSSCHEMADIR + if not schemadir: + raise Errors.WafError ('GSETTINGSSCHEMADIR not defined (should have been set up automatically during configure)') + + if install_files: + self.add_install_files(install_to=schemadir, install_from=install_files) + registered_schemas = getattr(self.bld, '_compile_schemas_registered', None) + if not registered_schemas: + registered_schemas = self.bld._compile_schemas_registered = set() + self.bld.add_post_fun(compile_schemas_callback) + registered_schemas.add(schemadir) + +class glib_validate_schema(Task.Task): + """ + Validates schema files + """ + run_str = 'rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}' + color = 'PINK' + +################## gresource + +@extension('.gresource.xml') +def process_gresource_source(self, node): + """ + Creates tasks that turn ``.gresource.xml`` files to C code + """ + if not self.env.GLIB_COMPILE_RESOURCES: + raise Errors.WafError ("Unable to process GResource file - glib-compile-resources was not found during configure") + + if 'gresource' in self.features: + return + + h_node = node.change_ext('_xml.h') + c_node = node.change_ext('_xml.c') + self.create_task('glib_gresource_source', node, [h_node, c_node]) + self.source.append(c_node) + +@feature('gresource') +def process_gresource_bundle(self): + """ + Creates tasks to turn ``.gresource`` files from ``.gresource.xml`` files:: + + def build(bld): + bld( + features='gresource', + source=['resources1.gresource.xml', 'resources2.gresource.xml'], + install_path='${LIBDIR}/${PACKAGE}' + ) + + :param source: XML files to process + :type source: list of string + :param install_path: installation path + :type install_path: string + """ + for i in self.to_list(self.source): + node = self.path.find_resource(i) + + task = self.create_task('glib_gresource_bundle', node, node.change_ext('')) + inst_to = getattr(self, 'install_path', None) + if inst_to: + self.add_install_files(install_to=inst_to, install_from=task.outputs) + +class glib_gresource_base(Task.Task): + """ + Base class for gresource based tasks + """ + color = 'BLUE' + base_cmd = '${GLIB_COMPILE_RESOURCES} --sourcedir=${SRC[0].parent.srcpath()} --sourcedir=${SRC[0].bld_dir()}' + + def scan(self): + """ + Scans gresource dependencies through ``glib-compile-resources --generate-dependencies command`` + """ + bld = self.generator.bld + kw = {} + kw['cwd'] = self.get_cwd() + kw['quiet'] = Context.BOTH + + cmd = Utils.subst_vars('${GLIB_COMPILE_RESOURCES} --sourcedir=%s --sourcedir=%s --generate-dependencies %s' % ( + self.inputs[0].parent.srcpath(), + self.inputs[0].bld_dir(), + self.inputs[0].bldpath() + ), self.env) + + output = bld.cmd_and_log(cmd, **kw) + + nodes = [] + names = [] + for dep in output.splitlines(): + if dep: + node = bld.bldnode.find_node(dep) + if node: + nodes.append(node) + else: + names.append(dep) + + return (nodes, names) + +class glib_gresource_source(glib_gresource_base): + """ + Task to generate C source code (.h and .c files) from a gresource.xml file + """ + vars = ['GLIB_COMPILE_RESOURCES'] + fun_h = Task.compile_fun_shell(glib_gresource_base.base_cmd + ' --target=${TGT[0].abspath()} --generate-header ${SRC}') + fun_c = Task.compile_fun_shell(glib_gresource_base.base_cmd + ' --target=${TGT[1].abspath()} --generate-source ${SRC}') + ext_out = ['.h'] + + def run(self): + return self.fun_h[0](self) or self.fun_c[0](self) + +class glib_gresource_bundle(glib_gresource_base): + """ + Task to generate a .gresource binary file from a gresource.xml file + """ + run_str = glib_gresource_base.base_cmd + ' --target=${TGT} ${SRC}' + shell = True # temporary workaround for #795 + +@conf +def find_glib_genmarshal(conf): + conf.find_program('glib-genmarshal', var='GLIB_GENMARSHAL') + +@conf +def find_glib_mkenums(conf): + if not conf.env.PERL: + conf.find_program('perl', var='PERL') + conf.find_program('glib-mkenums', interpreter='PERL', var='GLIB_MKENUMS') + +@conf +def find_glib_compile_schemas(conf): + # when cross-compiling, gsettings.m4 locates the program with the following: + # pkg-config --variable glib_compile_schemas gio-2.0 + conf.find_program('glib-compile-schemas', var='GLIB_COMPILE_SCHEMAS') + + def getstr(varname): + return getattr(Options.options, varname, getattr(conf.env,varname, '')) + + gsettingsschemadir = getstr('GSETTINGSSCHEMADIR') + if not gsettingsschemadir: + datadir = getstr('DATADIR') + if not datadir: + prefix = conf.env.PREFIX + datadir = os.path.join(prefix, 'share') + gsettingsschemadir = os.path.join(datadir, 'glib-2.0', 'schemas') + + conf.env.GSETTINGSSCHEMADIR = gsettingsschemadir + +@conf +def find_glib_compile_resources(conf): + conf.find_program('glib-compile-resources', var='GLIB_COMPILE_RESOURCES') + +def configure(conf): + """ + Finds the following programs: + + * *glib-genmarshal* and set *GLIB_GENMARSHAL* + * *glib-mkenums* and set *GLIB_MKENUMS* + * *glib-compile-schemas* and set *GLIB_COMPILE_SCHEMAS* (not mandatory) + * *glib-compile-resources* and set *GLIB_COMPILE_RESOURCES* (not mandatory) + """ + conf.find_glib_genmarshal() + conf.find_glib_mkenums() + conf.find_glib_compile_schemas(mandatory=False) + conf.find_glib_compile_resources(mandatory=False) + +def options(opt): + """ + Adds the ``--gsettingsschemadir`` command-line option + """ + gr = opt.add_option_group('Installation directories') + gr.add_option('--gsettingsschemadir', help='GSettings schema location [DATADIR/glib-2.0/schemas]', default='', dest='GSETTINGSSCHEMADIR') + diff --git a/third_party/waf/waflib/Tools/gnu_dirs.py b/third_party/waf/waflib/Tools/gnu_dirs.py new file mode 100644 index 0000000..2847071 --- /dev/null +++ b/third_party/waf/waflib/Tools/gnu_dirs.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Ali Sabil, 2007 + +""" +Sets various standard variables such as INCLUDEDIR. SBINDIR and others. To use this module just call:: + + opt.load('gnu_dirs') + +and:: + + conf.load('gnu_dirs') + +Add options for the standard GNU directories, this tool will add the options +found in autotools, and will update the environment with the following +installation variables: + +============== ========================================= ======================= +Variable Description Default Value +============== ========================================= ======================= +PREFIX installation prefix /usr/local +EXEC_PREFIX installation prefix for binaries PREFIX +BINDIR user commands EXEC_PREFIX/bin +SBINDIR system binaries EXEC_PREFIX/sbin +LIBEXECDIR program-specific binaries EXEC_PREFIX/libexec +SYSCONFDIR host-specific configuration PREFIX/etc +SHAREDSTATEDIR architecture-independent variable data PREFIX/com +LOCALSTATEDIR variable data PREFIX/var +LIBDIR object code libraries EXEC_PREFIX/lib +INCLUDEDIR header files PREFIX/include +OLDINCLUDEDIR header files for non-GCC compilers /usr/include +DATAROOTDIR architecture-independent data root PREFIX/share +DATADIR architecture-independent data DATAROOTDIR +INFODIR GNU "info" documentation DATAROOTDIR/info +LOCALEDIR locale-dependent data DATAROOTDIR/locale +MANDIR manual pages DATAROOTDIR/man +DOCDIR documentation root DATAROOTDIR/doc/APPNAME +HTMLDIR HTML documentation DOCDIR +DVIDIR DVI documentation DOCDIR +PDFDIR PDF documentation DOCDIR +PSDIR PostScript documentation DOCDIR +============== ========================================= ======================= +""" + +import os, re +from waflib import Utils, Options, Context + +gnuopts = ''' +bindir, user commands, ${EXEC_PREFIX}/bin +sbindir, system binaries, ${EXEC_PREFIX}/sbin +libexecdir, program-specific binaries, ${EXEC_PREFIX}/libexec +sysconfdir, host-specific configuration, ${PREFIX}/etc +sharedstatedir, architecture-independent variable data, ${PREFIX}/com +localstatedir, variable data, ${PREFIX}/var +libdir, object code libraries, ${EXEC_PREFIX}/lib%s +includedir, header files, ${PREFIX}/include +oldincludedir, header files for non-GCC compilers, /usr/include +datarootdir, architecture-independent data root, ${PREFIX}/share +datadir, architecture-independent data, ${DATAROOTDIR} +infodir, GNU "info" documentation, ${DATAROOTDIR}/info +localedir, locale-dependent data, ${DATAROOTDIR}/locale +mandir, manual pages, ${DATAROOTDIR}/man +docdir, documentation root, ${DATAROOTDIR}/doc/${PACKAGE} +htmldir, HTML documentation, ${DOCDIR} +dvidir, DVI documentation, ${DOCDIR} +pdfdir, PDF documentation, ${DOCDIR} +psdir, PostScript documentation, ${DOCDIR} +''' % Utils.lib64() + +_options = [x.split(', ') for x in gnuopts.splitlines() if x] + +def configure(conf): + """ + Reads the command-line options to set lots of variables in *conf.env*. The variables + BINDIR and LIBDIR will be overwritten. + """ + def get_param(varname, default): + return getattr(Options.options, varname, '') or default + + env = conf.env + env.LIBDIR = env.BINDIR = [] + env.EXEC_PREFIX = get_param('EXEC_PREFIX', env.PREFIX) + env.PACKAGE = getattr(Context.g_module, 'APPNAME', None) or env.PACKAGE + + complete = False + iter = 0 + while not complete and iter < len(_options) + 1: + iter += 1 + complete = True + for name, help, default in _options: + name = name.upper() + if not env[name]: + try: + env[name] = Utils.subst_vars(get_param(name, default).replace('/', os.sep), env) + except TypeError: + complete = False + + if not complete: + lst = [x for x, _, _ in _options if not env[x.upper()]] + raise conf.errors.WafError('Variable substitution failure %r' % lst) + +def options(opt): + """ + Adds lots of command-line options, for example:: + + --exec-prefix: EXEC_PREFIX + """ + inst_dir = opt.add_option_group('Installation prefix', +'By default, "waf install" will put the files in\ + "/usr/local/bin", "/usr/local/lib" etc. An installation prefix other\ + than "/usr/local" can be given using "--prefix", for example "--prefix=$HOME"') + + for k in ('--prefix', '--destdir'): + option = opt.parser.get_option(k) + if option: + opt.parser.remove_option(k) + inst_dir.add_option(option) + + inst_dir.add_option('--exec-prefix', + help = 'installation prefix for binaries [PREFIX]', + default = '', + dest = 'EXEC_PREFIX') + + dirs_options = opt.add_option_group('Installation directories') + + for name, help, default in _options: + option_name = '--' + name + str_default = default + str_help = '%s [%s]' % (help, re.sub(r'\$\{([^}]+)\}', r'\1', str_default)) + dirs_options.add_option(option_name, help=str_help, default='', dest=name.upper()) + diff --git a/third_party/waf/waflib/Tools/gxx.py b/third_party/waf/waflib/Tools/gxx.py new file mode 100644 index 0000000..22c5d26 --- /dev/null +++ b/third_party/waf/waflib/Tools/gxx.py @@ -0,0 +1,157 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) +# Yinon Ehrlich, 2009 + +""" +g++/llvm detection. +""" + +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_gxx(conf): + """ + Finds the program g++, and if present, try to detect its version number + """ + cxx = conf.find_program(['g++', 'c++'], var='CXX') + conf.get_cc_version(cxx, gcc=True) + conf.env.CXX_NAME = 'gcc' + +@conf +def gxx_common_flags(conf): + """ + Common flags for g++ on nearly all platforms + """ + v = conf.env + + v.CXX_SRC_F = [] + v.CXX_TGT_F = ['-c', '-o'] + + if not v.LINK_CXX: + v.LINK_CXX = v.CXX + + v.CXXLNK_SRC_F = [] + v.CXXLNK_TGT_F = ['-o'] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + v.RPATH_ST = '-Wl,-rpath,%s' + + v.SONAME_ST = '-Wl,-h,%s' + v.SHLIB_MARKER = '-Wl,-Bdynamic' + v.STLIB_MARKER = '-Wl,-Bstatic' + + v.cxxprogram_PATTERN = '%s' + + v.CXXFLAGS_cxxshlib = ['-fPIC'] + v.LINKFLAGS_cxxshlib = ['-shared'] + v.cxxshlib_PATTERN = 'lib%s.so' + + v.LINKFLAGS_cxxstlib = ['-Wl,-Bstatic'] + v.cxxstlib_PATTERN = 'lib%s.a' + + v.LINKFLAGS_MACBUNDLE = ['-bundle', '-undefined', 'dynamic_lookup'] + v.CXXFLAGS_MACBUNDLE = ['-fPIC'] + v.macbundle_PATTERN = '%s.bundle' + +@conf +def gxx_modifier_win32(conf): + """Configuration flags for executing gcc on Windows""" + v = conf.env + v.cxxprogram_PATTERN = '%s.exe' + + v.cxxshlib_PATTERN = '%s.dll' + v.implib_PATTERN = '%s.dll.a' + v.IMPLIB_ST = '-Wl,--out-implib,%s' + + v.CXXFLAGS_cxxshlib = [] + + # Auto-import is enabled by default even without this option, + # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages + # that the linker emits otherwise. + v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) + +@conf +def gxx_modifier_cygwin(conf): + """Configuration flags for executing g++ on Cygwin""" + gxx_modifier_win32(conf) + v = conf.env + v.cxxshlib_PATTERN = 'cyg%s.dll' + v.append_value('LINKFLAGS_cxxshlib', ['-Wl,--enable-auto-image-base']) + v.CXXFLAGS_cxxshlib = [] + +@conf +def gxx_modifier_darwin(conf): + """Configuration flags for executing g++ on MacOS""" + v = conf.env + v.CXXFLAGS_cxxshlib = ['-fPIC'] + v.LINKFLAGS_cxxshlib = ['-dynamiclib'] + v.cxxshlib_PATTERN = 'lib%s.dylib' + v.FRAMEWORKPATH_ST = '-F%s' + v.FRAMEWORK_ST = ['-framework'] + v.ARCH_ST = ['-arch'] + + v.LINKFLAGS_cxxstlib = [] + + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + v.SONAME_ST = [] + +@conf +def gxx_modifier_aix(conf): + """Configuration flags for executing g++ on AIX""" + v = conf.env + v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] + + v.LINKFLAGS_cxxshlib = ['-shared', '-Wl,-brtl,-bexpfull'] + v.SHLIB_MARKER = [] + +@conf +def gxx_modifier_hpux(conf): + v = conf.env + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + v.CFLAGS_cxxshlib = ['-fPIC','-DPIC'] + v.cxxshlib_PATTERN = 'lib%s.sl' + +@conf +def gxx_modifier_openbsd(conf): + conf.env.SONAME_ST = [] + +@conf +def gcc_modifier_osf1V(conf): + v = conf.env + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + v.SONAME_ST = [] + +@conf +def gxx_modifier_platform(conf): + """Execute platform-specific functions based on *gxx_modifier_+NAME*""" + # * set configurations specific for a platform. + # * the destination platform is detected automatically by looking at the macros the compiler predefines, + # and if it's not recognised, it fallbacks to sys.platform. + gxx_modifier_func = getattr(conf, 'gxx_modifier_' + conf.env.DEST_OS, None) + if gxx_modifier_func: + gxx_modifier_func() + +def configure(conf): + """ + Configuration for g++ + """ + conf.find_gxx() + conf.find_ar() + conf.gxx_common_flags() + conf.gxx_modifier_platform() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() + conf.check_gcc_o_space('cxx') + diff --git a/third_party/waf/waflib/Tools/icc.py b/third_party/waf/waflib/Tools/icc.py new file mode 100644 index 0000000..b6492c8 --- /dev/null +++ b/third_party/waf/waflib/Tools/icc.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Stian Selnes 2008 +# Thomas Nagy 2009-2018 (ita) + +""" +Detects the Intel C compiler +""" + +import sys +from waflib.Tools import ccroot, ar, gcc +from waflib.Configure import conf + +@conf +def find_icc(conf): + """ + Finds the program icc and execute it to ensure it really is icc + """ + cc = conf.find_program(['icc', 'ICL'], var='CC') + conf.get_cc_version(cc, icc=True) + conf.env.CC_NAME = 'icc' + +def configure(conf): + conf.find_icc() + conf.find_ar() + conf.gcc_common_flags() + conf.gcc_modifier_platform() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/Tools/icpc.py b/third_party/waf/waflib/Tools/icpc.py new file mode 100644 index 0000000..8a6cc6c --- /dev/null +++ b/third_party/waf/waflib/Tools/icpc.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy 2009-2018 (ita) + +""" +Detects the Intel C++ compiler +""" + +import sys +from waflib.Tools import ccroot, ar, gxx +from waflib.Configure import conf + +@conf +def find_icpc(conf): + """ + Finds the program icpc, and execute it to ensure it really is icpc + """ + cxx = conf.find_program('icpc', var='CXX') + conf.get_cc_version(cxx, icc=True) + conf.env.CXX_NAME = 'icc' + +def configure(conf): + conf.find_icpc() + conf.find_ar() + conf.gxx_common_flags() + conf.gxx_modifier_platform() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Tools/ifort.py b/third_party/waf/waflib/Tools/ifort.py new file mode 100644 index 0000000..17d3052 --- /dev/null +++ b/third_party/waf/waflib/Tools/ifort.py @@ -0,0 +1,413 @@ +#! /usr/bin/env python +# encoding: utf-8 +# DC 2008 +# Thomas Nagy 2016-2018 (ita) + +import os, re, traceback +from waflib import Utils, Logs, Errors +from waflib.Tools import fc, fc_config, fc_scan, ar, ccroot +from waflib.Configure import conf +from waflib.TaskGen import after_method, feature + +@conf +def find_ifort(conf): + fc = conf.find_program('ifort', var='FC') + conf.get_ifort_version(fc) + conf.env.FC_NAME = 'IFORT' + +@conf +def ifort_modifier_win32(self): + v = self.env + v.IFORT_WIN32 = True + v.FCSTLIB_MARKER = '' + v.FCSHLIB_MARKER = '' + + v.FCLIB_ST = v.FCSTLIB_ST = '%s.lib' + v.FCLIBPATH_ST = v.STLIBPATH_ST = '/LIBPATH:%s' + v.FCINCPATH_ST = '/I%s' + v.FCDEFINES_ST = '/D%s' + + v.fcprogram_PATTERN = v.fcprogram_test_PATTERN = '%s.exe' + v.fcshlib_PATTERN = '%s.dll' + v.fcstlib_PATTERN = v.implib_PATTERN = '%s.lib' + + v.FCLNK_TGT_F = '/out:' + v.FC_TGT_F = ['/c', '/o', ''] + v.FCFLAGS_fcshlib = '' + v.LINKFLAGS_fcshlib = '/DLL' + v.AR_TGT_F = '/out:' + v.IMPLIB_ST = '/IMPLIB:%s' + + v.append_value('LINKFLAGS', '/subsystem:console') + if v.IFORT_MANIFEST: + v.append_value('LINKFLAGS', ['/MANIFEST']) + +@conf +def ifort_modifier_darwin(conf): + fc_config.fortran_modifier_darwin(conf) + +@conf +def ifort_modifier_platform(conf): + dest_os = conf.env.DEST_OS or Utils.unversioned_sys_platform() + ifort_modifier_func = getattr(conf, 'ifort_modifier_' + dest_os, None) + if ifort_modifier_func: + ifort_modifier_func() + +@conf +def get_ifort_version(conf, fc): + """ + Detects the compiler version and sets ``conf.env.FC_VERSION`` + """ + version_re = re.compile(r"\bIntel\b.*\bVersion\s*(?P<major>\d*)\.(?P<minor>\d*)",re.I).search + if Utils.is_win32: + cmd = fc + else: + cmd = fc + ['-logo'] + + out, err = fc_config.getoutput(conf, cmd, stdin=False) + match = version_re(out) or version_re(err) + if not match: + conf.fatal('cannot determine ifort version.') + k = match.groupdict() + conf.env.FC_VERSION = (k['major'], k['minor']) + +def configure(conf): + """ + Detects the Intel Fortran compilers + """ + if Utils.is_win32: + compiler, version, path, includes, libdirs, arch = conf.detect_ifort() + v = conf.env + v.DEST_CPU = arch + v.PATH = path + v.INCLUDES = includes + v.LIBPATH = libdirs + v.MSVC_COMPILER = compiler + try: + v.MSVC_VERSION = float(version) + except ValueError: + v.MSVC_VERSION = float(version[:-3]) + + conf.find_ifort_win32() + conf.ifort_modifier_win32() + else: + conf.find_ifort() + conf.find_program('xiar', var='AR') + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.ifort_modifier_platform() + + +all_ifort_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] +"""List of icl platforms""" + +@conf +def gather_ifort_versions(conf, versions): + """ + List compiler versions by looking up registry keys + """ + version_pattern = re.compile(r'^...?.?\....?.?') + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\Fortran') + except OSError: + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\Fortran') + except OSError: + return + index = 0 + while 1: + try: + version = Utils.winreg.EnumKey(all_versions, index) + except OSError: + break + index += 1 + if not version_pattern.match(version): + continue + targets = {} + for target,arch in all_ifort_platforms: + if target=='intel64': + targetDir='EM64T_NATIVE' + else: + targetDir=target + try: + Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) + icl_version=Utils.winreg.OpenKey(all_versions,version) + path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') + except OSError: + pass + else: + batch_file=os.path.join(path,'bin','ifortvars.bat') + if os.path.isfile(batch_file): + targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) + + for target,arch in all_ifort_platforms: + try: + icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) + path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') + except OSError: + continue + else: + batch_file=os.path.join(path,'bin','ifortvars.bat') + if os.path.isfile(batch_file): + targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) + major = version[0:2] + versions['intel ' + major] = targets + +@conf +def setup_ifort(conf, versiondict): + """ + Checks installed compilers and targets and returns the first combination from the user's + options, env, or the global supported lists that checks. + + :param versiondict: dict(platform -> dict(architecture -> configuration)) + :type versiondict: dict(string -> dict(string -> target_compiler) + :return: the compiler, revision, path, include dirs, library paths and target architecture + :rtype: tuple of strings + """ + platforms = Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_ifort_platforms] + desired_versions = conf.env.MSVC_VERSIONS or list(reversed(list(versiondict.keys()))) + for version in desired_versions: + try: + targets = versiondict[version] + except KeyError: + continue + for arch in platforms: + try: + cfg = targets[arch] + except KeyError: + continue + cfg.evaluate() + if cfg.is_valid: + compiler,revision = version.rsplit(' ', 1) + return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu + conf.fatal('ifort: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) + +@conf +def get_ifort_version_win32(conf, compiler, version, target, vcvars): + # FIXME hack + try: + conf.msvc_cnt += 1 + except AttributeError: + conf.msvc_cnt = 1 + batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) + batfile.write("""@echo off +set INCLUDE= +set LIB= +call "%s" %s +echo PATH=%%PATH%% +echo INCLUDE=%%INCLUDE%% +echo LIB=%%LIB%%;%%LIBPATH%% +""" % (vcvars,target)) + sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()]) + batfile.delete() + lines = sout.splitlines() + + if not lines[0]: + lines.pop(0) + + MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None + for line in lines: + if line.startswith('PATH='): + path = line[5:] + MSVC_PATH = path.split(';') + elif line.startswith('INCLUDE='): + MSVC_INCDIR = [i for i in line[8:].split(';') if i] + elif line.startswith('LIB='): + MSVC_LIBDIR = [i for i in line[4:].split(';') if i] + if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): + conf.fatal('ifort: Could not find a valid architecture for building (get_ifort_version_win32)') + + # Check if the compiler is usable at all. + # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. + env = dict(os.environ) + env.update(PATH = path) + compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) + fc = conf.find_program(compiler_name, path_list=MSVC_PATH) + + # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. + if 'CL' in env: + del(env['CL']) + + try: + conf.cmd_and_log(fc + ['/help'], env=env) + except UnicodeError: + st = traceback.format_exc() + if conf.logger: + conf.logger.error(st) + conf.fatal('ifort: Unicode error - check the code page?') + except Exception as e: + Logs.debug('ifort: get_ifort_version: %r %r %r -> failure %s', compiler, version, target, str(e)) + conf.fatal('ifort: cannot run the compiler in get_ifort_version (run with -v to display errors)') + else: + Logs.debug('ifort: get_ifort_version: %r %r %r -> OK', compiler, version, target) + finally: + conf.env[compiler_name] = '' + + return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) + +class target_compiler(object): + """ + Wraps a compiler configuration; call evaluate() to determine + whether the configuration is usable. + """ + def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): + """ + :param ctx: configuration context to use to eventually get the version environment + :param compiler: compiler name + :param cpu: target cpu + :param version: compiler version number + :param bat_target: ? + :param bat: path to the batch file to run + :param callback: optional function to take the realized environment variables tup and map it (e.g. to combine other constant paths) + """ + self.conf = ctx + self.name = None + self.is_valid = False + self.is_done = False + + self.compiler = compiler + self.cpu = cpu + self.version = version + self.bat_target = bat_target + self.bat = bat + self.callback = callback + + def evaluate(self): + if self.is_done: + return + self.is_done = True + try: + vs = self.conf.get_ifort_version_win32(self.compiler, self.version, self.bat_target, self.bat) + except Errors.ConfigurationError: + self.is_valid = False + return + if self.callback: + vs = self.callback(self, vs) + self.is_valid = True + (self.bindirs, self.incdirs, self.libdirs) = vs + + def __str__(self): + return str((self.bindirs, self.incdirs, self.libdirs)) + + def __repr__(self): + return repr((self.bindirs, self.incdirs, self.libdirs)) + +@conf +def detect_ifort(self): + return self.setup_ifort(self.get_ifort_versions(False)) + +@conf +def get_ifort_versions(self, eval_and_save=True): + """ + :return: platforms to compiler configurations + :rtype: dict + """ + dct = {} + self.gather_ifort_versions(dct) + return dct + +def _get_prog_names(self, compiler): + if compiler=='intel': + compiler_name = 'ifort' + linker_name = 'XILINK' + lib_name = 'XILIB' + else: + # assumes CL.exe + compiler_name = 'CL' + linker_name = 'LINK' + lib_name = 'LIB' + return compiler_name, linker_name, lib_name + +@conf +def find_ifort_win32(conf): + # the autodetection is supposed to be performed before entering in this method + v = conf.env + path = v.PATH + compiler = v.MSVC_COMPILER + version = v.MSVC_VERSION + + compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) + v.IFORT_MANIFEST = (compiler == 'intel' and version >= 11) + + # compiler + fc = conf.find_program(compiler_name, var='FC', path_list=path) + + # before setting anything, check if the compiler is really intel fortran + env = dict(conf.environ) + if path: + env.update(PATH = ';'.join(path)) + if not conf.cmd_and_log(fc + ['/nologo', '/help'], env=env): + conf.fatal('not intel fortran compiler could not be identified') + + v.FC_NAME = 'IFORT' + + if not v.LINK_FC: + conf.find_program(linker_name, var='LINK_FC', path_list=path, mandatory=True) + + if not v.AR: + conf.find_program(lib_name, path_list=path, var='AR', mandatory=True) + v.ARFLAGS = ['/nologo'] + + # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later + if v.IFORT_MANIFEST: + conf.find_program('MT', path_list=path, var='MT') + v.MTFLAGS = ['/nologo'] + + try: + conf.load('winres') + except Errors.WafError: + Logs.warn('Resource compiler not found. Compiling resource file is disabled') + +####################################################################################################### +##### conf above, build below + +@after_method('apply_link') +@feature('fc') +def apply_flags_ifort(self): + """ + Adds additional flags implied by msvc, such as subsystems and pdb files:: + + def build(bld): + bld.stlib(source='main.c', target='bar', subsystem='gruik') + """ + if not self.env.IFORT_WIN32 or not getattr(self, 'link_task', None): + return + + is_static = isinstance(self.link_task, ccroot.stlink_task) + + subsystem = getattr(self, 'subsystem', '') + if subsystem: + subsystem = '/subsystem:%s' % subsystem + flags = is_static and 'ARFLAGS' or 'LINKFLAGS' + self.env.append_value(flags, subsystem) + + if not is_static: + for f in self.env.LINKFLAGS: + d = f.lower() + if d[1:] == 'debug': + pdbnode = self.link_task.outputs[0].change_ext('.pdb') + self.link_task.outputs.append(pdbnode) + + if getattr(self, 'install_task', None): + self.pdb_install_task = self.add_install_files(install_to=self.install_task.install_to, install_from=pdbnode) + + break + +@feature('fcprogram', 'fcshlib', 'fcprogram_test') +@after_method('apply_link') +def apply_manifest_ifort(self): + """ + Enables manifest embedding in Fortran DLLs when using ifort on Windows + See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx + """ + if self.env.IFORT_WIN32 and getattr(self, 'link_task', None): + # it seems ifort.exe cannot be called for linking + self.link_task.env.FC = self.env.LINK_FC + + if self.env.IFORT_WIN32 and self.env.IFORT_MANIFEST and getattr(self, 'link_task', None): + out_node = self.link_task.outputs[0] + man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') + self.link_task.outputs.append(man_node) + self.env.DO_MANIFEST = True + diff --git a/third_party/waf/waflib/Tools/intltool.py b/third_party/waf/waflib/Tools/intltool.py new file mode 100644 index 0000000..af95ba8 --- /dev/null +++ b/third_party/waf/waflib/Tools/intltool.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +""" +Support for translation tools such as msgfmt and intltool + +Usage:: + + def configure(conf): + conf.load('gnu_dirs intltool') + + def build(bld): + # process the .po files into .gmo files, and install them in LOCALEDIR + bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") + + # process an input file, substituting the translations from the po dir + bld( + features = "intltool_in", + podir = "../po", + style = "desktop", + flags = ["-u"], + source = 'kupfer.desktop.in', + install_path = "${DATADIR}/applications", + ) + +Usage of the :py:mod:`waflib.Tools.gnu_dirs` is recommended, but not obligatory. +""" + +from __future__ import with_statement + +import os, re +from waflib import Context, Task, Utils, Logs +import waflib.Tools.ccroot +from waflib.TaskGen import feature, before_method, taskgen_method +from waflib.Logs import error +from waflib.Configure import conf + +_style_flags = { + 'ba': '-b', + 'desktop': '-d', + 'keys': '-k', + 'quoted': '--quoted-style', + 'quotedxml': '--quotedxml-style', + 'rfc822deb': '-r', + 'schemas': '-s', + 'xml': '-x', +} + +@taskgen_method +def ensure_localedir(self): + """ + Expands LOCALEDIR from DATAROOTDIR/locale if possible, or falls back to PREFIX/share/locale + """ + # use the tool gnu_dirs to provide options to define this + if not self.env.LOCALEDIR: + if self.env.DATAROOTDIR: + self.env.LOCALEDIR = os.path.join(self.env.DATAROOTDIR, 'locale') + else: + self.env.LOCALEDIR = os.path.join(self.env.PREFIX, 'share', 'locale') + +@before_method('process_source') +@feature('intltool_in') +def apply_intltool_in_f(self): + """ + Creates tasks to translate files by intltool-merge:: + + def build(bld): + bld( + features = "intltool_in", + podir = "../po", + style = "desktop", + flags = ["-u"], + source = 'kupfer.desktop.in', + install_path = "${DATADIR}/applications", + ) + + :param podir: location of the .po files + :type podir: string + :param source: source files to process + :type source: list of string + :param style: the intltool-merge mode of operation, can be one of the following values: + ``ba``, ``desktop``, ``keys``, ``quoted``, ``quotedxml``, ``rfc822deb``, ``schemas`` and ``xml``. + See the ``intltool-merge`` man page for more information about supported modes of operation. + :type style: string + :param flags: compilation flags ("-quc" by default) + :type flags: list of string + :param install_path: installation path + :type install_path: string + """ + try: + self.meths.remove('process_source') + except ValueError: + pass + + self.ensure_localedir() + + podir = getattr(self, 'podir', '.') + podirnode = self.path.find_dir(podir) + if not podirnode: + error("could not find the podir %r" % podir) + return + + cache = getattr(self, 'intlcache', '.intlcache') + self.env.INTLCACHE = [os.path.join(str(self.path.get_bld()), podir, cache)] + self.env.INTLPODIR = podirnode.bldpath() + self.env.append_value('INTLFLAGS', getattr(self, 'flags', self.env.INTLFLAGS_DEFAULT)) + + if '-c' in self.env.INTLFLAGS: + self.bld.fatal('Redundant -c flag in intltool task %r' % self) + + style = getattr(self, 'style', None) + if style: + try: + style_flag = _style_flags[style] + except KeyError: + self.bld.fatal('intltool_in style "%s" is not valid' % style) + + self.env.append_unique('INTLFLAGS', [style_flag]) + + for i in self.to_list(self.source): + node = self.path.find_resource(i) + + task = self.create_task('intltool', node, node.change_ext('')) + inst = getattr(self, 'install_path', None) + if inst: + self.add_install_files(install_to=inst, install_from=task.outputs) + +@feature('intltool_po') +def apply_intltool_po(self): + """ + Creates tasks to process po files:: + + def build(bld): + bld(features='intltool_po', appname='myapp', podir='po', install_path="${LOCALEDIR}") + + The relevant task generator arguments are: + + :param podir: directory of the .po files + :type podir: string + :param appname: name of the application + :type appname: string + :param install_path: installation directory + :type install_path: string + + The file LINGUAS must be present in the directory pointed by *podir* and list the translation files to process. + """ + try: + self.meths.remove('process_source') + except ValueError: + pass + + self.ensure_localedir() + + appname = getattr(self, 'appname', getattr(Context.g_module, Context.APPNAME, 'set_your_app_name')) + podir = getattr(self, 'podir', '.') + inst = getattr(self, 'install_path', '${LOCALEDIR}') + + linguas = self.path.find_node(os.path.join(podir, 'LINGUAS')) + if linguas: + # scan LINGUAS file for locales to process + with open(linguas.abspath()) as f: + langs = [] + for line in f.readlines(): + # ignore lines containing comments + if not line.startswith('#'): + langs += line.split() + re_linguas = re.compile('[-a-zA-Z_@.]+') + for lang in langs: + # Make sure that we only process lines which contain locales + if re_linguas.match(lang): + node = self.path.find_resource(os.path.join(podir, re_linguas.match(lang).group() + '.po')) + task = self.create_task('po', node, node.change_ext('.mo')) + + if inst: + filename = task.outputs[0].name + (langname, ext) = os.path.splitext(filename) + inst_file = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + appname + '.mo' + self.add_install_as(install_to=inst_file, install_from=task.outputs[0], + chmod=getattr(self, 'chmod', Utils.O644)) + + else: + Logs.pprint('RED', "Error no LINGUAS file found in po directory") + +class po(Task.Task): + """ + Compiles .po files into .gmo files + """ + run_str = '${MSGFMT} -o ${TGT} ${SRC}' + color = 'BLUE' + +class intltool(Task.Task): + """ + Calls intltool-merge to update translation files + """ + run_str = '${INTLTOOL} ${INTLFLAGS} ${INTLCACHE_ST:INTLCACHE} ${INTLPODIR} ${SRC} ${TGT}' + color = 'BLUE' + +@conf +def find_msgfmt(conf): + """ + Detects msgfmt and sets the ``MSGFMT`` variable + """ + conf.find_program('msgfmt', var='MSGFMT') + +@conf +def find_intltool_merge(conf): + """ + Detects intltool-merge + """ + if not conf.env.PERL: + conf.find_program('perl', var='PERL') + conf.env.INTLCACHE_ST = '--cache=%s' + conf.env.INTLFLAGS_DEFAULT = ['-q', '-u'] + conf.find_program('intltool-merge', interpreter='PERL', var='INTLTOOL') + +def configure(conf): + """ + Detects the program *msgfmt* and set *conf.env.MSGFMT*. + Detects the program *intltool-merge* and set *conf.env.INTLTOOL*. + It is possible to set INTLTOOL in the environment, but it must not have spaces in it:: + + $ INTLTOOL="/path/to/the program/intltool" waf configure + + If a C/C++ compiler is present, execute a compilation test to find the header *locale.h*. + """ + conf.find_msgfmt() + conf.find_intltool_merge() + if conf.env.CC or conf.env.CXX: + conf.check(header_name='locale.h') + diff --git a/third_party/waf/waflib/Tools/irixcc.py b/third_party/waf/waflib/Tools/irixcc.py new file mode 100644 index 0000000..0335c13 --- /dev/null +++ b/third_party/waf/waflib/Tools/irixcc.py @@ -0,0 +1,54 @@ +#! /usr/bin/env python +# encoding: utf-8 +# imported from samba + +""" +Compiler definition for irix/MIPSpro cc compiler +""" + +from waflib import Errors +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_irixcc(conf): + v = conf.env + cc = conf.find_program('cc', var='CC') + try: + conf.cmd_and_log(cc + ['-version']) + except Errors.WafError: + conf.fatal('%r -version could not be executed' % cc) + v.CC_NAME = 'irix' + +@conf +def irixcc_common_flags(conf): + v = conf.env + + v.CC_SRC_F = '' + v.CC_TGT_F = ['-c', '-o'] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + if not v.LINK_CC: + v.LINK_CC = v.CC + + v.CCLNK_SRC_F = '' + v.CCLNK_TGT_F = ['-o'] + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + + v.cprogram_PATTERN = '%s' + v.cshlib_PATTERN = 'lib%s.so' + v.cstlib_PATTERN = 'lib%s.a' + +def configure(conf): + conf.find_irixcc() + conf.find_ar() + conf.irixcc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Tools/javaw.py b/third_party/waf/waflib/Tools/javaw.py new file mode 100644 index 0000000..b7f5dd1 --- /dev/null +++ b/third_party/waf/waflib/Tools/javaw.py @@ -0,0 +1,593 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +""" +Java support + +Javac is one of the few compilers that behaves very badly: + +#. it outputs files where it wants to (-d is only for the package root) + +#. it recompiles files silently behind your back + +#. it outputs an undefined amount of files (inner classes) + +Remember that the compilation can be performed using Jython[1] rather than regular Python. Instead of +running one of the following commands:: + + ./waf configure + python waf configure + +You would have to run:: + + java -jar /path/to/jython.jar waf configure + +[1] http://www.jython.org/ + +Usage +===== + +Load the "java" tool. + +def configure(conf): + conf.load('java') + +Java tools will be autodetected and eventually, if present, the quite +standard JAVA_HOME environment variable will be used. The also standard +CLASSPATH variable is used for library searching. + +In configuration phase checks can be done on the system environment, for +example to check if a class is known in the classpath:: + + conf.check_java_class('java.io.FileOutputStream') + +or if the system supports JNI applications building:: + + conf.check_jni_headers() + + +The java tool supports compiling java code, creating jar files and +creating javadoc documentation. This can be either done separately or +together in a single definition. For example to manage them separately:: + + bld(features = 'javac', + srcdir = 'src', + compat = '1.7', + use = 'animals', + name = 'cats-src', + ) + + bld(features = 'jar', + basedir = '.', + destfile = '../cats.jar', + name = 'cats', + use = 'cats-src' + ) + + +Or together by defining all the needed attributes:: + + bld(features = 'javac jar javadoc', + srcdir = 'src/', # folder containing the sources to compile + outdir = 'src', # folder where to output the classes (in the build directory) + compat = '1.6', # java compatibility version number + classpath = ['.', '..'], + + # jar + basedir = 'src', # folder containing the classes and other files to package (must match outdir) + destfile = 'foo.jar', # do not put the destfile in the folder of the java classes! + use = 'NNN', + jaropts = ['-C', 'default/src/', '.'], # can be used to give files + manifest = 'src/Manifest.mf', # Manifest file to include + + # javadoc + javadoc_package = ['com.meow' , 'com.meow.truc.bar', 'com.meow.truc.foo'], + javadoc_output = 'javadoc', + ) + +External jar dependencies can be mapped to a standard waf "use" dependency by +setting an environment variable with a CLASSPATH prefix in the configuration, +for example:: + + conf.env.CLASSPATH_NNN = ['aaaa.jar', 'bbbb.jar'] + +and then NNN can be freely used in rules as:: + + use = 'NNN', + +In the java tool the dependencies via use are not transitive by default, as +this necessity depends on the code. To enable recursive dependency scanning +use on a specific rule: + + recurse_use = True + +Or build-wise by setting RECURSE_JAVA: + + bld.env.RECURSE_JAVA = True + +Unit tests can be integrated in the waf unit test environment using the javatest extra. +""" + +import os, shutil +from waflib import Task, Utils, Errors, Node +from waflib.Configure import conf +from waflib.TaskGen import feature, before_method, after_method, taskgen_method + +from waflib.Tools import ccroot +ccroot.USELIB_VARS['javac'] = set(['CLASSPATH', 'JAVACFLAGS']) + +SOURCE_RE = '**/*.java' +JAR_RE = '**/*' + +class_check_source = ''' +public class Test { + public static void main(String[] argv) { + Class lib; + if (argv.length < 1) { + System.err.println("Missing argument"); + System.exit(77); + } + try { + lib = Class.forName(argv[0]); + } catch (ClassNotFoundException e) { + System.err.println("ClassNotFoundException"); + System.exit(1); + } + lib = null; + System.exit(0); + } +} +''' + +@feature('javac') +@before_method('process_source') +def apply_java(self): + """ + Create a javac task for compiling *.java files*. There can be + only one javac task by task generator. + """ + Utils.def_attrs(self, jarname='', classpath='', + sourcepath='.', srcdir='.', + jar_mf_attributes={}, jar_mf_classpath=[]) + + outdir = getattr(self, 'outdir', None) + if outdir: + if not isinstance(outdir, Node.Node): + outdir = self.path.get_bld().make_node(self.outdir) + else: + outdir = self.path.get_bld() + outdir.mkdir() + self.outdir = outdir + self.env.OUTDIR = outdir.abspath() + + self.javac_task = tsk = self.create_task('javac') + tmp = [] + + srcdir = getattr(self, 'srcdir', '') + if isinstance(srcdir, Node.Node): + srcdir = [srcdir] + for x in Utils.to_list(srcdir): + if isinstance(x, Node.Node): + y = x + else: + y = self.path.find_dir(x) + if not y: + self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) + tmp.append(y) + + tsk.srcdir = tmp + + if getattr(self, 'compat', None): + tsk.env.append_value('JAVACFLAGS', ['-source', str(self.compat)]) + + if hasattr(self, 'sourcepath'): + fold = [isinstance(x, Node.Node) and x or self.path.find_dir(x) for x in self.to_list(self.sourcepath)] + names = os.pathsep.join([x.srcpath() for x in fold]) + else: + names = [x.srcpath() for x in tsk.srcdir] + + if names: + tsk.env.append_value('JAVACFLAGS', ['-sourcepath', names]) + + +@taskgen_method +def java_use_rec(self, name, **kw): + """ + Processes recursively the *use* attribute for each referred java compilation + """ + if name in self.tmp_use_seen: + return + + self.tmp_use_seen.append(name) + + try: + y = self.bld.get_tgen_by_name(name) + except Errors.WafError: + self.uselib.append(name) + return + else: + y.post() + # Add generated JAR name for CLASSPATH. Task ordering (set_run_after) + # is already guaranteed by ordering done between the single tasks + if hasattr(y, 'jar_task'): + self.use_lst.append(y.jar_task.outputs[0].abspath()) + else: + if hasattr(y,'outdir'): + self.use_lst.append(y.outdir.abspath()) + else: + self.use_lst.append(y.path.get_bld().abspath()) + + for x in self.to_list(getattr(y, 'use', [])): + self.java_use_rec(x) + +@feature('javac') +@before_method('propagate_uselib_vars') +@after_method('apply_java') +def use_javac_files(self): + """ + Processes the *use* attribute referring to other java compilations + """ + self.use_lst = [] + self.tmp_use_seen = [] + self.uselib = self.to_list(getattr(self, 'uselib', [])) + names = self.to_list(getattr(self, 'use', [])) + get = self.bld.get_tgen_by_name + for x in names: + try: + tg = get(x) + except Errors.WafError: + self.uselib.append(x) + else: + tg.post() + if hasattr(tg, 'jar_task'): + self.use_lst.append(tg.jar_task.outputs[0].abspath()) + self.javac_task.set_run_after(tg.jar_task) + self.javac_task.dep_nodes.extend(tg.jar_task.outputs) + else: + if hasattr(tg, 'outdir'): + base_node = tg.outdir + else: + base_node = tg.path.get_bld() + + self.use_lst.append(base_node.abspath()) + self.javac_task.dep_nodes.extend([dx for dx in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) + + for tsk in tg.tasks: + self.javac_task.set_run_after(tsk) + + # If recurse use scan is enabled recursively add use attribute for each used one + if getattr(self, 'recurse_use', False) or self.bld.env.RECURSE_JAVA: + self.java_use_rec(x) + + self.env.append_value('CLASSPATH', self.use_lst) + +@feature('javac') +@after_method('apply_java', 'propagate_uselib_vars', 'use_javac_files') +def set_classpath(self): + """ + Sets the CLASSPATH value on the *javac* task previously created. + """ + if getattr(self, 'classpath', None): + self.env.append_unique('CLASSPATH', getattr(self, 'classpath', [])) + for x in self.tasks: + x.env.CLASSPATH = os.pathsep.join(self.env.CLASSPATH) + os.pathsep + +@feature('jar') +@after_method('apply_java', 'use_javac_files') +@before_method('process_source') +def jar_files(self): + """ + Creates a jar task (one maximum per task generator) + """ + destfile = getattr(self, 'destfile', 'test.jar') + jaropts = getattr(self, 'jaropts', []) + manifest = getattr(self, 'manifest', None) + + basedir = getattr(self, 'basedir', None) + if basedir: + if not isinstance(self.basedir, Node.Node): + basedir = self.path.get_bld().make_node(basedir) + else: + basedir = self.path.get_bld() + if not basedir: + self.bld.fatal('Could not find the basedir %r for %r' % (self.basedir, self)) + + self.jar_task = tsk = self.create_task('jar_create') + if manifest: + jarcreate = getattr(self, 'jarcreate', 'cfm') + if not isinstance(manifest,Node.Node): + node = self.path.find_resource(manifest) + else: + node = manifest + if not node: + self.bld.fatal('invalid manifest file %r for %r' % (manifest, self)) + tsk.dep_nodes.append(node) + jaropts.insert(0, node.abspath()) + else: + jarcreate = getattr(self, 'jarcreate', 'cf') + if not isinstance(destfile, Node.Node): + destfile = self.path.find_or_declare(destfile) + if not destfile: + self.bld.fatal('invalid destfile %r for %r' % (destfile, self)) + tsk.set_outputs(destfile) + tsk.basedir = basedir + + jaropts.append('-C') + jaropts.append(basedir.bldpath()) + jaropts.append('.') + + tsk.env.JAROPTS = jaropts + tsk.env.JARCREATE = jarcreate + + if getattr(self, 'javac_task', None): + tsk.set_run_after(self.javac_task) + +@feature('jar') +@after_method('jar_files') +def use_jar_files(self): + """ + Processes the *use* attribute to set the build order on the + tasks created by another task generator. + """ + self.uselib = self.to_list(getattr(self, 'uselib', [])) + names = self.to_list(getattr(self, 'use', [])) + get = self.bld.get_tgen_by_name + for x in names: + try: + y = get(x) + except Errors.WafError: + self.uselib.append(x) + else: + y.post() + self.jar_task.run_after.update(y.tasks) + +class JTask(Task.Task): + """ + Base class for java and jar tasks; provides functionality to run long commands + """ + def split_argfile(self, cmd): + inline = [cmd[0]] + infile = [] + for x in cmd[1:]: + # jar and javac do not want -J flags in @file + if x.startswith('-J'): + inline.append(x) + else: + infile.append(self.quote_flag(x)) + return (inline, infile) + +class jar_create(JTask): + """ + Creates a jar file + """ + color = 'GREEN' + run_str = '${JAR} ${JARCREATE} ${TGT} ${JAROPTS}' + + def runnable_status(self): + """ + Wait for dependent tasks to be executed, then read the + files to update the list of inputs. + """ + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + if not self.inputs: + try: + self.inputs = [x for x in self.basedir.ant_glob(JAR_RE, remove=False, quiet=True) if id(x) != id(self.outputs[0])] + except Exception: + raise Errors.WafError('Could not find the basedir %r for %r' % (self.basedir, self)) + return super(jar_create, self).runnable_status() + +class javac(JTask): + """ + Compiles java files + """ + color = 'BLUE' + run_str = '${JAVAC} -classpath ${CLASSPATH} -d ${OUTDIR} ${JAVACFLAGS} ${SRC}' + vars = ['CLASSPATH', 'JAVACFLAGS', 'JAVAC', 'OUTDIR'] + """ + The javac task will be executed again if the variables CLASSPATH, JAVACFLAGS, JAVAC or OUTDIR change. + """ + def uid(self): + """Identify java tasks by input&output folder""" + lst = [self.__class__.__name__, self.generator.outdir.abspath()] + for x in self.srcdir: + lst.append(x.abspath()) + return Utils.h_list(lst) + + def runnable_status(self): + """ + Waits for dependent tasks to be complete, then read the file system to find the input nodes. + """ + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + + if not self.inputs: + self.inputs = [] + for x in self.srcdir: + if x.exists(): + self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False, quiet=True)) + return super(javac, self).runnable_status() + + def post_run(self): + """ + List class files created + """ + for node in self.generator.outdir.ant_glob('**/*.class', quiet=True): + self.generator.bld.node_sigs[node] = self.uid() + self.generator.bld.task_sigs[self.uid()] = self.cache_sig + +@feature('javadoc') +@after_method('process_rule') +def create_javadoc(self): + """ + Creates a javadoc task (feature 'javadoc') + """ + tsk = self.create_task('javadoc') + tsk.classpath = getattr(self, 'classpath', []) + self.javadoc_package = Utils.to_list(self.javadoc_package) + if not isinstance(self.javadoc_output, Node.Node): + self.javadoc_output = self.bld.path.find_or_declare(self.javadoc_output) + +class javadoc(Task.Task): + """ + Builds java documentation + """ + color = 'BLUE' + + def __str__(self): + return '%s: %s -> %s\n' % (self.__class__.__name__, self.generator.srcdir, self.generator.javadoc_output) + + def run(self): + env = self.env + bld = self.generator.bld + wd = bld.bldnode + + #add src node + bld node (for generated java code) + srcpath = self.generator.path.abspath() + os.sep + self.generator.srcdir + srcpath += os.pathsep + srcpath += self.generator.path.get_bld().abspath() + os.sep + self.generator.srcdir + + classpath = env.CLASSPATH + classpath += os.pathsep + classpath += os.pathsep.join(self.classpath) + classpath = "".join(classpath) + + self.last_cmd = lst = [] + lst.extend(Utils.to_list(env.JAVADOC)) + lst.extend(['-d', self.generator.javadoc_output.abspath()]) + lst.extend(['-sourcepath', srcpath]) + lst.extend(['-classpath', classpath]) + lst.extend(['-subpackages']) + lst.extend(self.generator.javadoc_package) + lst = [x for x in lst if x] + + self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, quiet=0) + + def post_run(self): + nodes = self.generator.javadoc_output.ant_glob('**', quiet=True) + for node in nodes: + self.generator.bld.node_sigs[node] = self.uid() + self.generator.bld.task_sigs[self.uid()] = self.cache_sig + +def configure(self): + """ + Detects the javac, java and jar programs + """ + # If JAVA_PATH is set, we prepend it to the path list + java_path = self.environ['PATH'].split(os.pathsep) + v = self.env + + if 'JAVA_HOME' in self.environ: + java_path = [os.path.join(self.environ['JAVA_HOME'], 'bin')] + java_path + self.env.JAVA_HOME = [self.environ['JAVA_HOME']] + + for x in 'javac java jar javadoc'.split(): + self.find_program(x, var=x.upper(), path_list=java_path, mandatory=(x not in ('javadoc'))) + + if 'CLASSPATH' in self.environ: + v.CLASSPATH = self.environ['CLASSPATH'] + + if not v.JAR: + self.fatal('jar is required for making java packages') + if not v.JAVAC: + self.fatal('javac is required for compiling java classes') + + v.JARCREATE = 'cf' # can use cvf + v.JAVACFLAGS = [] + +@conf +def check_java_class(self, classname, with_classpath=None): + """ + Checks if the specified java class exists + + :param classname: class to check, like java.util.HashMap + :type classname: string + :param with_classpath: additional classpath to give + :type with_classpath: string + """ + javatestdir = '.waf-javatest' + + classpath = javatestdir + if self.env.CLASSPATH: + classpath += os.pathsep + self.env.CLASSPATH + if isinstance(with_classpath, str): + classpath += os.pathsep + with_classpath + + shutil.rmtree(javatestdir, True) + os.mkdir(javatestdir) + + Utils.writef(os.path.join(javatestdir, 'Test.java'), class_check_source) + + # Compile the source + self.exec_command(self.env.JAVAC + [os.path.join(javatestdir, 'Test.java')], shell=False) + + # Try to run the app + cmd = self.env.JAVA + ['-cp', classpath, 'Test', classname] + self.to_log("%s\n" % str(cmd)) + found = self.exec_command(cmd, shell=False) + + self.msg('Checking for java class %s' % classname, not found) + + shutil.rmtree(javatestdir, True) + + return found + +@conf +def check_jni_headers(conf): + """ + Checks for jni headers and libraries. On success the conf.env variables xxx_JAVA are added for use in C/C++ targets:: + + def options(opt): + opt.load('compiler_c') + + def configure(conf): + conf.load('compiler_c java') + conf.check_jni_headers() + + def build(bld): + bld.shlib(source='a.c', target='app', use='JAVA') + """ + if not conf.env.CC_NAME and not conf.env.CXX_NAME: + conf.fatal('load a compiler first (gcc, g++, ..)') + + if not conf.env.JAVA_HOME: + conf.fatal('set JAVA_HOME in the system environment') + + # jni requires the jvm + javaHome = conf.env.JAVA_HOME[0] + + dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/include') + if dir is None: + dir = conf.root.find_dir(conf.env.JAVA_HOME[0] + '/../Headers') # think different?! + if dir is None: + conf.fatal('JAVA_HOME does not seem to be set properly') + + f = dir.ant_glob('**/(jni|jni_md).h') + incDirs = [x.parent.abspath() for x in f] + + dir = conf.root.find_dir(conf.env.JAVA_HOME[0]) + f = dir.ant_glob('**/*jvm.(so|dll|dylib)') + libDirs = [x.parent.abspath() for x in f] or [javaHome] + + # On windows, we need both the .dll and .lib to link. On my JDK, they are + # in different directories... + f = dir.ant_glob('**/*jvm.(lib)') + if f: + libDirs = [[x, y.parent.abspath()] for x in libDirs for y in f] + + if conf.env.DEST_OS == 'freebsd': + conf.env.append_unique('LINKFLAGS_JAVA', '-pthread') + for d in libDirs: + try: + conf.check(header_name='jni.h', define_name='HAVE_JNI_H', lib='jvm', + libpath=d, includes=incDirs, uselib_store='JAVA', uselib='JAVA') + except Exception: + pass + else: + break + else: + conf.fatal('could not find lib jvm in %r (see config.log)' % libDirs) + diff --git a/third_party/waf/waflib/Tools/ldc2.py b/third_party/waf/waflib/Tools/ldc2.py new file mode 100644 index 0000000..a51c344 --- /dev/null +++ b/third_party/waf/waflib/Tools/ldc2.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Alex Rønne Petersen, 2012 (alexrp/Zor) + +from waflib.Tools import ar, d +from waflib.Configure import conf + +@conf +def find_ldc2(conf): + """ + Finds the program *ldc2* and set the variable *D* + """ + conf.find_program(['ldc2'], var='D') + + out = conf.cmd_and_log(conf.env.D + ['-version']) + if out.find("based on DMD v2.") == -1: + conf.fatal("detected compiler is not ldc2") + +@conf +def common_flags_ldc2(conf): + """ + Sets the D flags required by *ldc2* + """ + v = conf.env + + v.D_SRC_F = ['-c'] + v.D_TGT_F = '-of%s' + + v.D_LINKER = v.D + v.DLNK_SRC_F = '' + v.DLNK_TGT_F = '-of%s' + v.DINC_ST = '-I%s' + + v.DSHLIB_MARKER = v.DSTLIB_MARKER = '' + v.DSTLIB_ST = v.DSHLIB_ST = '-L-l%s' + v.DSTLIBPATH_ST = v.DLIBPATH_ST = '-L-L%s' + + v.LINKFLAGS_dshlib = ['-L-shared'] + + v.DHEADER_ext = '.di' + v.DFLAGS_d_with_header = ['-H', '-Hf'] + v.D_HDR_F = '%s' + + v.LINKFLAGS = [] + v.DFLAGS_dshlib = ['-relocation-model=pic'] + +def configure(conf): + """ + Configuration for *ldc2* + """ + conf.find_ldc2() + conf.load('ar') + conf.load('d') + conf.common_flags_ldc2() + conf.d_platform_flags() + diff --git a/third_party/waf/waflib/Tools/lua.py b/third_party/waf/waflib/Tools/lua.py new file mode 100644 index 0000000..15a333a --- /dev/null +++ b/third_party/waf/waflib/Tools/lua.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Sebastian Schlingmann, 2008 +# Thomas Nagy, 2008-2018 (ita) + +""" +Lua support. + +Compile *.lua* files into *.luac*:: + + def configure(conf): + conf.load('lua') + conf.env.LUADIR = '/usr/local/share/myapp/scripts/' + def build(bld): + bld(source='foo.lua') +""" + +from waflib.TaskGen import extension +from waflib import Task + +@extension('.lua') +def add_lua(self, node): + tsk = self.create_task('luac', node, node.change_ext('.luac')) + inst_to = getattr(self, 'install_path', self.env.LUADIR and '${LUADIR}' or None) + if inst_to: + self.add_install_files(install_to=inst_to, install_from=tsk.outputs) + return tsk + +class luac(Task.Task): + run_str = '${LUAC} -s -o ${TGT} ${SRC}' + color = 'PINK' + +def configure(conf): + """ + Detect the luac compiler and set *conf.env.LUAC* + """ + conf.find_program('luac', var='LUAC') + diff --git a/third_party/waf/waflib/Tools/md5_tstamp.py b/third_party/waf/waflib/Tools/md5_tstamp.py new file mode 100644 index 0000000..d1569fa --- /dev/null +++ b/third_party/waf/waflib/Tools/md5_tstamp.py @@ -0,0 +1,41 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +Re-calculate md5 hashes of files only when the file time have changed:: + + def options(opt): + opt.load('md5_tstamp') + +The hashes can also reflect either the file contents (STRONGEST=True) or the +file time and file size. + +The performance benefits of this module are usually insignificant. +""" + +import os, stat +from waflib import Utils, Build, Node + +STRONGEST = True + +Build.SAVED_ATTRS.append('hashes_md5_tstamp') +def h_file(self): + filename = self.abspath() + st = os.stat(filename) + + cache = self.ctx.hashes_md5_tstamp + if filename in cache and cache[filename][0] == st.st_mtime: + return cache[filename][1] + + if STRONGEST: + ret = Utils.h_file(filename) + else: + if stat.S_ISDIR(st[stat.ST_MODE]): + raise IOError('Not a file') + ret = Utils.md5(str((st.st_mtime, st.st_size)).encode()).digest() + + cache[filename] = (st.st_mtime, ret) + return ret +h_file.__doc__ = Node.Node.h_file.__doc__ +Node.Node.h_file = h_file + diff --git a/third_party/waf/waflib/Tools/msvc.py b/third_party/waf/waflib/Tools/msvc.py new file mode 100644 index 0000000..d60f670 --- /dev/null +++ b/third_party/waf/waflib/Tools/msvc.py @@ -0,0 +1,1041 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Carlos Rafael Giani, 2006 (dv) +# Tamas Pal, 2007 (folti) +# Nicolas Mercier, 2009 +# Matt Clarkson, 2012 + +""" +Microsoft Visual C++/Intel C++ compiler support + +If you get detection problems, first try any of the following:: + + chcp 65001 + set PYTHONIOENCODING=... + set PYTHONLEGACYWINDOWSSTDIO=1 + +Usage:: + + $ waf configure --msvc_version="msvc 10.0,msvc 9.0" --msvc_target="x64" + +or:: + + def configure(conf): + conf.env.MSVC_VERSIONS = ['msvc 10.0', 'msvc 9.0', 'msvc 8.0', 'msvc 7.1', 'msvc 7.0', 'msvc 6.0', 'wsdk 7.0', 'intel 11', 'PocketPC 9.0', 'Smartphone 8.0'] + conf.env.MSVC_TARGETS = ['x64'] + conf.load('msvc') + +or:: + + def configure(conf): + conf.load('msvc', funs='no_autodetect') + conf.check_lib_msvc('gdi32') + conf.check_libs_msvc('kernel32 user32') + def build(bld): + tg = bld.program(source='main.c', target='app', use='KERNEL32 USER32 GDI32') + +Platforms and targets will be tested in the order they appear; +the first good configuration will be used. + +To force testing all the configurations that are not used, use the ``--no-msvc-lazy`` option +or set ``conf.env.MSVC_LAZY_AUTODETECT=False``. + +Supported platforms: ia64, x64, x86, x86_amd64, x86_ia64, x86_arm, amd64_x86, amd64_arm + +Compilers supported: + +* msvc => Visual Studio, versions 6.0 (VC 98, VC .NET 2002) to 15 (Visual Studio 2017) +* wsdk => Windows SDK, versions 6.0, 6.1, 7.0, 7.1, 8.0 +* icl => Intel compiler, versions 9, 10, 11, 13 +* winphone => Visual Studio to target Windows Phone 8 native (version 8.0 for now) +* Smartphone => Compiler/SDK for Smartphone devices (armv4/v4i) +* PocketPC => Compiler/SDK for PocketPC devices (armv4/v4i) + +To use WAF in a VS2008 Make file project (see http://code.google.com/p/waf/issues/detail?id=894) +You may consider to set the environment variable "VS_UNICODE_OUTPUT" to nothing before calling waf. +So in your project settings use something like 'cmd.exe /C "set VS_UNICODE_OUTPUT=& set PYTHONUNBUFFERED=true & waf build"'. +cmd.exe /C "chcp 1252 & set PYTHONUNBUFFERED=true && set && waf configure" +Setting PYTHONUNBUFFERED gives the unbuffered output. +""" + +import os, sys, re, traceback +from waflib import Utils, Logs, Options, Errors +from waflib.TaskGen import after_method, feature + +from waflib.Configure import conf +from waflib.Tools import ccroot, c, cxx, ar + +g_msvc_systemlibs = ''' +aclui activeds ad1 adptif adsiid advapi32 asycfilt authz bhsupp bits bufferoverflowu cabinet +cap certadm certidl ciuuid clusapi comctl32 comdlg32 comsupp comsuppd comsuppw comsuppwd comsvcs +credui crypt32 cryptnet cryptui d3d8thk daouuid dbgeng dbghelp dciman32 ddao35 ddao35d +ddao35u ddao35ud delayimp dhcpcsvc dhcpsapi dlcapi dnsapi dsprop dsuiext dtchelp +faultrep fcachdll fci fdi framedyd framedyn gdi32 gdiplus glauxglu32 gpedit gpmuuid +gtrts32w gtrtst32hlink htmlhelp httpapi icm32 icmui imagehlp imm32 iphlpapi iprop +kernel32 ksguid ksproxy ksuser libcmt libcmtd libcpmt libcpmtd loadperf lz32 mapi +mapi32 mgmtapi minidump mmc mobsync mpr mprapi mqoa mqrt msacm32 mscms mscoree +msdasc msimg32 msrating mstask msvcmrt msvcurt msvcurtd mswsock msxml2 mtx mtxdm +netapi32 nmapinmsupp npptools ntdsapi ntdsbcli ntmsapi ntquery odbc32 odbcbcp +odbccp32 oldnames ole32 oleacc oleaut32 oledb oledlgolepro32 opends60 opengl32 +osptk parser pdh penter pgobootrun pgort powrprof psapi ptrustm ptrustmd ptrustu +ptrustud qosname rasapi32 rasdlg rassapi resutils riched20 rpcndr rpcns4 rpcrt4 rtm +rtutils runtmchk scarddlg scrnsave scrnsavw secur32 sensapi setupapi sfc shell32 +shfolder shlwapi sisbkup snmpapi sporder srclient sti strsafe svcguid tapi32 thunk32 +traffic unicows url urlmon user32 userenv usp10 uuid uxtheme vcomp vcompd vdmdbg +version vfw32 wbemuuid webpost wiaguid wininet winmm winscard winspool winstrm +wintrust wldap32 wmiutils wow32 ws2_32 wsnmp32 wsock32 wst wtsapi32 xaswitch xolehlp +'''.split() +"""importlibs provided by MSVC/Platform SDK. Do NOT search them""" + +all_msvc_platforms = [ ('x64', 'amd64'), ('x86', 'x86'), ('ia64', 'ia64'), + ('x86_amd64', 'amd64'), ('x86_ia64', 'ia64'), ('x86_arm', 'arm'), ('x86_arm64', 'arm64'), + ('amd64_x86', 'x86'), ('amd64_arm', 'arm'), ('amd64_arm64', 'arm64') ] +"""List of msvc platforms""" + +all_wince_platforms = [ ('armv4', 'arm'), ('armv4i', 'arm'), ('mipsii', 'mips'), ('mipsii_fp', 'mips'), ('mipsiv', 'mips'), ('mipsiv_fp', 'mips'), ('sh4', 'sh'), ('x86', 'cex86') ] +"""List of wince platforms""" + +all_icl_platforms = [ ('intel64', 'amd64'), ('em64t', 'amd64'), ('ia32', 'x86'), ('Itanium', 'ia64')] +"""List of icl platforms""" + +def options(opt): + default_ver = '' + vsver = os.getenv('VSCMD_VER') + if vsver: + m = re.match(r'(^\d+\.\d+).*', vsver) + if m: + default_ver = 'msvc %s' % m.group(1) + opt.add_option('--msvc_version', type='string', help = 'msvc version, eg: "msvc 10.0,msvc 9.0"', default=default_ver) + opt.add_option('--msvc_targets', type='string', help = 'msvc targets, eg: "x64,arm"', default='') + opt.add_option('--no-msvc-lazy', action='store_false', help = 'lazily check msvc target environments', default=True, dest='msvc_lazy') + +class MSVCVersion(object): + def __init__(self, ver): + m = re.search(r'^(.*)\s+(\d+[.]\d+)', ver) + if m: + self.name = m.group(1) + self.number = float(m.group(2)) + else: + self.name = ver + self.number = 0. + + def __lt__(self, other): + if self.number == other.number: + return self.name < other.name + return self.number < other.number + +@conf +def setup_msvc(conf, versiondict): + """ + Checks installed compilers and targets and returns the first combination from the user's + options, env, or the global supported lists that checks. + + :param versiondict: dict(platform -> dict(architecture -> configuration)) + :type versiondict: dict(string -> dict(string -> target_compiler) + :return: the compiler, revision, path, include dirs, library paths and target architecture + :rtype: tuple of strings + """ + platforms = getattr(Options.options, 'msvc_targets', '').split(',') + if platforms == ['']: + platforms=Utils.to_list(conf.env.MSVC_TARGETS) or [i for i,j in all_msvc_platforms+all_icl_platforms+all_wince_platforms] + desired_versions = getattr(Options.options, 'msvc_version', '').split(',') + if desired_versions == ['']: + desired_versions = conf.env.MSVC_VERSIONS or list(sorted(versiondict.keys(), key=MSVCVersion, reverse=True)) + + # Override lazy detection by evaluating after the fact. + lazy_detect = getattr(Options.options, 'msvc_lazy', True) + if conf.env.MSVC_LAZY_AUTODETECT is False: + lazy_detect = False + + if not lazy_detect: + for val in versiondict.values(): + for arch in list(val.keys()): + cfg = val[arch] + cfg.evaluate() + if not cfg.is_valid: + del val[arch] + conf.env.MSVC_INSTALLED_VERSIONS = versiondict + + for version in desired_versions: + Logs.debug('msvc: detecting %r - %r', version, desired_versions) + try: + targets = versiondict[version] + except KeyError: + continue + + seen = set() + for arch in platforms: + if arch in seen: + continue + else: + seen.add(arch) + try: + cfg = targets[arch] + except KeyError: + continue + + cfg.evaluate() + if cfg.is_valid: + compiler,revision = version.rsplit(' ', 1) + return compiler,revision,cfg.bindirs,cfg.incdirs,cfg.libdirs,cfg.cpu + conf.fatal('msvc: Impossible to find a valid architecture for building %r - %r' % (desired_versions, list(versiondict.keys()))) + +@conf +def get_msvc_version(conf, compiler, version, target, vcvars): + """ + Checks that an installed compiler actually runs and uses vcvars to obtain the + environment needed by the compiler. + + :param compiler: compiler type, for looking up the executable name + :param version: compiler version, for debugging only + :param target: target architecture + :param vcvars: batch file to run to check the environment + :return: the location of the compiler executable, the location of include dirs, and the library paths + :rtype: tuple of strings + """ + Logs.debug('msvc: get_msvc_version: %r %r %r', compiler, version, target) + + try: + conf.msvc_cnt += 1 + except AttributeError: + conf.msvc_cnt = 1 + batfile = conf.bldnode.make_node('waf-print-msvc-%d.bat' % conf.msvc_cnt) + batfile.write("""@echo off +set INCLUDE= +set LIB= +call "%s" %s +echo PATH=%%PATH%% +echo INCLUDE=%%INCLUDE%% +echo LIB=%%LIB%%;%%LIBPATH%% +""" % (vcvars,target)) + sout = conf.cmd_and_log(['cmd.exe', '/E:on', '/V:on', '/C', batfile.abspath()], stdin=getattr(Utils.subprocess, 'DEVNULL', None)) + lines = sout.splitlines() + + if not lines[0]: + lines.pop(0) + + MSVC_PATH = MSVC_INCDIR = MSVC_LIBDIR = None + for line in lines: + if line.startswith('PATH='): + path = line[5:] + MSVC_PATH = path.split(';') + elif line.startswith('INCLUDE='): + MSVC_INCDIR = [i for i in line[8:].split(';') if i] + elif line.startswith('LIB='): + MSVC_LIBDIR = [i for i in line[4:].split(';') if i] + if None in (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR): + conf.fatal('msvc: Could not find a valid architecture for building (get_msvc_version_3)') + + # Check if the compiler is usable at all. + # The detection may return 64-bit versions even on 32-bit systems, and these would fail to run. + env = dict(os.environ) + env.update(PATH = path) + compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) + cxx = conf.find_program(compiler_name, path_list=MSVC_PATH) + + # delete CL if exists. because it could contain parameters which can change cl's behaviour rather catastrophically. + if 'CL' in env: + del(env['CL']) + + try: + conf.cmd_and_log(cxx + ['/help'], env=env) + except UnicodeError: + st = traceback.format_exc() + if conf.logger: + conf.logger.error(st) + conf.fatal('msvc: Unicode error - check the code page?') + except Exception as e: + Logs.debug('msvc: get_msvc_version: %r %r %r -> failure %s', compiler, version, target, str(e)) + conf.fatal('msvc: cannot run the compiler in get_msvc_version (run with -v to display errors)') + else: + Logs.debug('msvc: get_msvc_version: %r %r %r -> OK', compiler, version, target) + finally: + conf.env[compiler_name] = '' + + return (MSVC_PATH, MSVC_INCDIR, MSVC_LIBDIR) + +def gather_wince_supported_platforms(): + """ + Checks SmartPhones SDKs + + :param versions: list to modify + :type versions: list + """ + supported_wince_platforms = [] + try: + ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Windows CE Tools\\SDKs') + except OSError: + try: + ce_sdk = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Windows CE Tools\\SDKs') + except OSError: + ce_sdk = '' + if not ce_sdk: + return supported_wince_platforms + + index = 0 + while 1: + try: + sdk_device = Utils.winreg.EnumKey(ce_sdk, index) + sdk = Utils.winreg.OpenKey(ce_sdk, sdk_device) + except OSError: + break + index += 1 + try: + path,type = Utils.winreg.QueryValueEx(sdk, 'SDKRootDir') + except OSError: + try: + path,type = Utils.winreg.QueryValueEx(sdk,'SDKInformation') + except OSError: + continue + path,xml = os.path.split(path) + path = str(path) + path,device = os.path.split(path) + if not device: + path,device = os.path.split(path) + platforms = [] + for arch,compiler in all_wince_platforms: + if os.path.isdir(os.path.join(path, device, 'Lib', arch)): + platforms.append((arch, compiler, os.path.join(path, device, 'Include', arch), os.path.join(path, device, 'Lib', arch))) + if platforms: + supported_wince_platforms.append((device, platforms)) + return supported_wince_platforms + +def gather_msvc_detected_versions(): + #Detected MSVC versions! + version_pattern = re.compile(r'^(\d\d?\.\d\d?)(Exp)?$') + detected_versions = [] + for vcver,vcvar in (('VCExpress','Exp'), ('VisualStudio','')): + prefix = 'SOFTWARE\\Wow6432node\\Microsoft\\' + vcver + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) + except OSError: + prefix = 'SOFTWARE\\Microsoft\\' + vcver + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, prefix) + except OSError: + continue + + index = 0 + while 1: + try: + version = Utils.winreg.EnumKey(all_versions, index) + except OSError: + break + index += 1 + match = version_pattern.match(version) + if match: + versionnumber = float(match.group(1)) + else: + continue + detected_versions.append((versionnumber, version+vcvar, prefix+'\\'+version)) + def fun(tup): + return tup[0] + + detected_versions.sort(key = fun) + return detected_versions + +class target_compiler(object): + """ + Wrap a compiler configuration; call evaluate() to determine + whether the configuration is usable. + """ + def __init__(self, ctx, compiler, cpu, version, bat_target, bat, callback=None): + """ + :param ctx: configuration context to use to eventually get the version environment + :param compiler: compiler name + :param cpu: target cpu + :param version: compiler version number + :param bat_target: ? + :param bat: path to the batch file to run + """ + self.conf = ctx + self.name = None + self.is_valid = False + self.is_done = False + + self.compiler = compiler + self.cpu = cpu + self.version = version + self.bat_target = bat_target + self.bat = bat + self.callback = callback + + def evaluate(self): + if self.is_done: + return + self.is_done = True + try: + vs = self.conf.get_msvc_version(self.compiler, self.version, self.bat_target, self.bat) + except Errors.ConfigurationError: + self.is_valid = False + return + if self.callback: + vs = self.callback(self, vs) + self.is_valid = True + (self.bindirs, self.incdirs, self.libdirs) = vs + + def __str__(self): + return str((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) + + def __repr__(self): + return repr((self.compiler, self.cpu, self.version, self.bat_target, self.bat)) + +@conf +def gather_wsdk_versions(conf, versions): + """ + Use winreg to add the msvc versions to the input list + + :param versions: list to modify + :type versions: list + """ + version_pattern = re.compile(r'^v..?.?\...?.?') + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Microsoft\\Microsoft SDKs\\Windows') + except OSError: + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows') + except OSError: + return + index = 0 + while 1: + try: + version = Utils.winreg.EnumKey(all_versions, index) + except OSError: + break + index += 1 + if not version_pattern.match(version): + continue + try: + msvc_version = Utils.winreg.OpenKey(all_versions, version) + path,type = Utils.winreg.QueryValueEx(msvc_version,'InstallationFolder') + except OSError: + continue + if path and os.path.isfile(os.path.join(path, 'bin', 'SetEnv.cmd')): + targets = {} + for target,arch in all_msvc_platforms: + targets[target] = target_compiler(conf, 'wsdk', arch, version, '/'+target, os.path.join(path, 'bin', 'SetEnv.cmd')) + versions['wsdk ' + version[1:]] = targets + +@conf +def gather_msvc_targets(conf, versions, version, vc_path): + #Looking for normal MSVC compilers! + targets = {} + + if os.path.isfile(os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')): + for target,realtarget in all_msvc_platforms[::-1]: + targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'VC', 'Auxiliary', 'Build', 'vcvarsall.bat')) + elif os.path.isfile(os.path.join(vc_path, 'vcvarsall.bat')): + for target,realtarget in all_msvc_platforms[::-1]: + targets[target] = target_compiler(conf, 'msvc', realtarget, version, target, os.path.join(vc_path, 'vcvarsall.bat')) + elif os.path.isfile(os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')): + targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, 'x86', os.path.join(vc_path, 'Common7', 'Tools', 'vsvars32.bat')) + elif os.path.isfile(os.path.join(vc_path, 'Bin', 'vcvars32.bat')): + targets['x86'] = target_compiler(conf, 'msvc', 'x86', version, '', os.path.join(vc_path, 'Bin', 'vcvars32.bat')) + if targets: + versions['msvc %s' % version] = targets + +@conf +def gather_wince_targets(conf, versions, version, vc_path, vsvars, supported_platforms): + #Looking for Win CE compilers! + for device,platforms in supported_platforms: + targets = {} + for platform,compiler,include,lib in platforms: + winCEpath = os.path.join(vc_path, 'ce') + if not os.path.isdir(winCEpath): + continue + + if os.path.isdir(os.path.join(winCEpath, 'lib', platform)): + bindirs = [os.path.join(winCEpath, 'bin', compiler), os.path.join(winCEpath, 'bin', 'x86_'+compiler)] + incdirs = [os.path.join(winCEpath, 'include'), os.path.join(winCEpath, 'atlmfc', 'include'), include] + libdirs = [os.path.join(winCEpath, 'lib', platform), os.path.join(winCEpath, 'atlmfc', 'lib', platform), lib] + def combine_common(obj, compiler_env): + # TODO this is likely broken, remove in waf 2.1 + (common_bindirs,_1,_2) = compiler_env + return (bindirs + common_bindirs, incdirs, libdirs) + targets[platform] = target_compiler(conf, 'msvc', platform, version, 'x86', vsvars, combine_common) + if targets: + versions[device + ' ' + version] = targets + +@conf +def gather_winphone_targets(conf, versions, version, vc_path, vsvars): + #Looking for WinPhone compilers + targets = {} + for target,realtarget in all_msvc_platforms[::-1]: + targets[target] = target_compiler(conf, 'winphone', realtarget, version, target, vsvars) + if targets: + versions['winphone ' + version] = targets + +@conf +def gather_vswhere_versions(conf, versions): + try: + import json + except ImportError: + Logs.error('Visual Studio 2017 detection requires Python 2.6') + return + + prg_path = os.environ.get('ProgramFiles(x86)', os.environ.get('ProgramFiles', 'C:\\Program Files (x86)')) + + vswhere = os.path.join(prg_path, 'Microsoft Visual Studio', 'Installer', 'vswhere.exe') + args = [vswhere, '-products', '*', '-legacy', '-format', 'json'] + try: + txt = conf.cmd_and_log(args) + except Errors.WafError as e: + Logs.debug('msvc: vswhere.exe failed %s', e) + return + + if sys.version_info[0] < 3: + txt = txt.decode(Utils.console_encoding()) + + arr = json.loads(txt) + arr.sort(key=lambda x: x['installationVersion']) + for entry in arr: + ver = entry['installationVersion'] + ver = str('.'.join(ver.split('.')[:2])) + path = str(os.path.abspath(entry['installationPath'])) + if os.path.exists(path) and ('msvc %s' % ver) not in versions: + conf.gather_msvc_targets(versions, ver, path) + +@conf +def gather_msvc_versions(conf, versions): + vc_paths = [] + for (v,version,reg) in gather_msvc_detected_versions(): + try: + try: + msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\VC") + except OSError: + msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, reg + "\\Setup\\Microsoft Visual C++") + path,type = Utils.winreg.QueryValueEx(msvc_version, 'ProductDir') + except OSError: + try: + msvc_version = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\Wow6432node\\Microsoft\\VisualStudio\\SxS\\VS7") + path,type = Utils.winreg.QueryValueEx(msvc_version, version) + except OSError: + continue + else: + vc_paths.append((version, os.path.abspath(str(path)))) + continue + else: + vc_paths.append((version, os.path.abspath(str(path)))) + + wince_supported_platforms = gather_wince_supported_platforms() + + for version,vc_path in vc_paths: + vs_path = os.path.dirname(vc_path) + vsvars = os.path.join(vs_path, 'Common7', 'Tools', 'vsvars32.bat') + if wince_supported_platforms and os.path.isfile(vsvars): + conf.gather_wince_targets(versions, version, vc_path, vsvars, wince_supported_platforms) + + # WP80 works with 11.0Exp and 11.0, both of which resolve to the same vc_path. + # Stop after one is found. + for version,vc_path in vc_paths: + vs_path = os.path.dirname(vc_path) + vsvars = os.path.join(vs_path, 'VC', 'WPSDK', 'WP80', 'vcvarsphoneall.bat') + if os.path.isfile(vsvars): + conf.gather_winphone_targets(versions, '8.0', vc_path, vsvars) + break + + for version,vc_path in vc_paths: + vs_path = os.path.dirname(vc_path) + conf.gather_msvc_targets(versions, version, vc_path) + +@conf +def gather_icl_versions(conf, versions): + """ + Checks ICL compilers + + :param versions: list to modify + :type versions: list + """ + version_pattern = re.compile(r'^...?.?\....?.?') + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Compilers\\C++') + except OSError: + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Compilers\\C++') + except OSError: + return + index = 0 + while 1: + try: + version = Utils.winreg.EnumKey(all_versions, index) + except OSError: + break + index += 1 + if not version_pattern.match(version): + continue + targets = {} + for target,arch in all_icl_platforms: + if target=='intel64': + targetDir='EM64T_NATIVE' + else: + targetDir=target + try: + Utils.winreg.OpenKey(all_versions,version+'\\'+targetDir) + icl_version=Utils.winreg.OpenKey(all_versions,version) + path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') + except OSError: + pass + else: + batch_file=os.path.join(path,'bin','iclvars.bat') + if os.path.isfile(batch_file): + targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) + for target,arch in all_icl_platforms: + try: + icl_version = Utils.winreg.OpenKey(all_versions, version+'\\'+target) + path,type = Utils.winreg.QueryValueEx(icl_version,'ProductDir') + except OSError: + continue + else: + batch_file=os.path.join(path,'bin','iclvars.bat') + if os.path.isfile(batch_file): + targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) + major = version[0:2] + versions['intel ' + major] = targets + +@conf +def gather_intel_composer_versions(conf, versions): + """ + Checks ICL compilers that are part of Intel Composer Suites + + :param versions: list to modify + :type versions: list + """ + version_pattern = re.compile(r'^...?.?\...?.?.?') + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Wow6432node\\Intel\\Suites') + except OSError: + try: + all_versions = Utils.winreg.OpenKey(Utils.winreg.HKEY_LOCAL_MACHINE, 'SOFTWARE\\Intel\\Suites') + except OSError: + return + index = 0 + while 1: + try: + version = Utils.winreg.EnumKey(all_versions, index) + except OSError: + break + index += 1 + if not version_pattern.match(version): + continue + targets = {} + for target,arch in all_icl_platforms: + if target=='intel64': + targetDir='EM64T_NATIVE' + else: + targetDir=target + try: + try: + defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\'+targetDir) + except OSError: + if targetDir == 'EM64T_NATIVE': + defaults = Utils.winreg.OpenKey(all_versions,version+'\\Defaults\\C++\\EM64T') + else: + raise + uid,type = Utils.winreg.QueryValueEx(defaults, 'SubKey') + Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++\\'+targetDir) + icl_version=Utils.winreg.OpenKey(all_versions,version+'\\'+uid+'\\C++') + path,type=Utils.winreg.QueryValueEx(icl_version,'ProductDir') + except OSError: + pass + else: + batch_file=os.path.join(path,'bin','iclvars.bat') + if os.path.isfile(batch_file): + targets[target] = target_compiler(conf, 'intel', arch, version, target, batch_file) + # The intel compilervar_arch.bat is broken when used with Visual Studio Express 2012 + # http://software.intel.com/en-us/forums/topic/328487 + compilervars_warning_attr = '_compilervars_warning_key' + if version[0:2] == '13' and getattr(conf, compilervars_warning_attr, True): + setattr(conf, compilervars_warning_attr, False) + patch_url = 'http://software.intel.com/en-us/forums/topic/328487' + compilervars_arch = os.path.join(path, 'bin', 'compilervars_arch.bat') + for vscomntool in ('VS110COMNTOOLS', 'VS100COMNTOOLS'): + if vscomntool in os.environ: + vs_express_path = os.environ[vscomntool] + r'..\IDE\VSWinExpress.exe' + dev_env_path = os.environ[vscomntool] + r'..\IDE\devenv.exe' + if (r'if exist "%VS110COMNTOOLS%..\IDE\VSWinExpress.exe"' in Utils.readf(compilervars_arch) and + not os.path.exists(vs_express_path) and not os.path.exists(dev_env_path)): + Logs.warn(('The Intel compilervar_arch.bat only checks for one Visual Studio SKU ' + '(VSWinExpress.exe) but it does not seem to be installed at %r. ' + 'The intel command line set up will fail to configure unless the file %r' + 'is patched. See: %s') % (vs_express_path, compilervars_arch, patch_url)) + major = version[0:2] + versions['intel ' + major] = targets + +@conf +def detect_msvc(self): + return self.setup_msvc(self.get_msvc_versions()) + +@conf +def get_msvc_versions(self): + """ + :return: platform to compiler configurations + :rtype: dict + """ + dct = Utils.ordered_iter_dict() + self.gather_icl_versions(dct) + self.gather_intel_composer_versions(dct) + self.gather_wsdk_versions(dct) + self.gather_msvc_versions(dct) + self.gather_vswhere_versions(dct) + Logs.debug('msvc: detected versions %r', list(dct.keys())) + return dct + +@conf +def find_lt_names_msvc(self, libname, is_static=False): + """ + Win32/MSVC specific code to glean out information from libtool la files. + this function is not attached to the task_gen class. Returns a triplet: + (library absolute path, library name without extension, whether the library is static) + """ + lt_names=[ + 'lib%s.la' % libname, + '%s.la' % libname, + ] + + for path in self.env.LIBPATH: + for la in lt_names: + laf=os.path.join(path,la) + dll=None + if os.path.exists(laf): + ltdict = Utils.read_la_file(laf) + lt_libdir=None + if ltdict.get('libdir', ''): + lt_libdir = ltdict['libdir'] + if not is_static and ltdict.get('library_names', ''): + dllnames=ltdict['library_names'].split() + dll=dllnames[0].lower() + dll=re.sub(r'\.dll$', '', dll) + return (lt_libdir, dll, False) + elif ltdict.get('old_library', ''): + olib=ltdict['old_library'] + if os.path.exists(os.path.join(path,olib)): + return (path, olib, True) + elif lt_libdir != '' and os.path.exists(os.path.join(lt_libdir,olib)): + return (lt_libdir, olib, True) + else: + return (None, olib, True) + else: + raise self.errors.WafError('invalid libtool object file: %s' % laf) + return (None, None, None) + +@conf +def libname_msvc(self, libname, is_static=False): + lib = libname.lower() + lib = re.sub(r'\.lib$','',lib) + + if lib in g_msvc_systemlibs: + return lib + + lib=re.sub('^lib','',lib) + + if lib == 'm': + return None + + (lt_path, lt_libname, lt_static) = self.find_lt_names_msvc(lib, is_static) + + if lt_path != None and lt_libname != None: + if lt_static: + # file existence check has been made by find_lt_names + return os.path.join(lt_path,lt_libname) + + if lt_path != None: + _libpaths = [lt_path] + self.env.LIBPATH + else: + _libpaths = self.env.LIBPATH + + static_libs=[ + 'lib%ss.lib' % lib, + 'lib%s.lib' % lib, + '%ss.lib' % lib, + '%s.lib' %lib, + ] + + dynamic_libs=[ + 'lib%s.dll.lib' % lib, + 'lib%s.dll.a' % lib, + '%s.dll.lib' % lib, + '%s.dll.a' % lib, + 'lib%s_d.lib' % lib, + '%s_d.lib' % lib, + '%s.lib' %lib, + ] + + libnames=static_libs + if not is_static: + libnames=dynamic_libs + static_libs + + for path in _libpaths: + for libn in libnames: + if os.path.exists(os.path.join(path, libn)): + Logs.debug('msvc: lib found: %s', os.path.join(path,libn)) + return re.sub(r'\.lib$', '',libn) + + #if no lib can be found, just return the libname as msvc expects it + self.fatal('The library %r could not be found' % libname) + return re.sub(r'\.lib$', '', libname) + +@conf +def check_lib_msvc(self, libname, is_static=False, uselib_store=None): + """ + Ideally we should be able to place the lib in the right env var, either STLIB or LIB, + but we don't distinguish static libs from shared libs. + This is ok since msvc doesn't have any special linker flag to select static libs (no env.STLIB_MARKER) + """ + libn = self.libname_msvc(libname, is_static) + + if not uselib_store: + uselib_store = libname.upper() + + if False and is_static: # disabled + self.env['STLIB_' + uselib_store] = [libn] + else: + self.env['LIB_' + uselib_store] = [libn] + +@conf +def check_libs_msvc(self, libnames, is_static=False): + for libname in Utils.to_list(libnames): + self.check_lib_msvc(libname, is_static) + +def configure(conf): + """ + Configuration methods to call for detecting msvc + """ + conf.autodetect(True) + conf.find_msvc() + conf.msvc_common_flags() + conf.cc_load_tools() + conf.cxx_load_tools() + conf.cc_add_flags() + conf.cxx_add_flags() + conf.link_add_flags() + conf.visual_studio_add_flags() + +@conf +def no_autodetect(conf): + conf.env.NO_MSVC_DETECT = 1 + configure(conf) + +@conf +def autodetect(conf, arch=False): + v = conf.env + if v.NO_MSVC_DETECT: + return + + compiler, version, path, includes, libdirs, cpu = conf.detect_msvc() + if arch: + v.DEST_CPU = cpu + + v.PATH = path + v.INCLUDES = includes + v.LIBPATH = libdirs + v.MSVC_COMPILER = compiler + try: + v.MSVC_VERSION = float(version) + except ValueError: + v.MSVC_VERSION = float(version[:-3]) + +def _get_prog_names(conf, compiler): + if compiler == 'intel': + compiler_name = 'ICL' + linker_name = 'XILINK' + lib_name = 'XILIB' + else: + # assumes CL.exe + compiler_name = 'CL' + linker_name = 'LINK' + lib_name = 'LIB' + return compiler_name, linker_name, lib_name + +@conf +def find_msvc(conf): + """Due to path format limitations, limit operation only to native Win32. Yeah it sucks.""" + if sys.platform == 'cygwin': + conf.fatal('MSVC module does not work under cygwin Python!') + + # the autodetection is supposed to be performed before entering in this method + v = conf.env + path = v.PATH + compiler = v.MSVC_COMPILER + version = v.MSVC_VERSION + + compiler_name, linker_name, lib_name = _get_prog_names(conf, compiler) + v.MSVC_MANIFEST = (compiler == 'msvc' and version >= 8) or (compiler == 'wsdk' and version >= 6) or (compiler == 'intel' and version >= 11) + + # compiler + cxx = conf.find_program(compiler_name, var='CXX', path_list=path) + + # before setting anything, check if the compiler is really msvc + env = dict(conf.environ) + if path: + env.update(PATH = ';'.join(path)) + if not conf.cmd_and_log(cxx + ['/nologo', '/help'], env=env): + conf.fatal('the msvc compiler could not be identified') + + # c/c++ compiler + v.CC = v.CXX = cxx + v.CC_NAME = v.CXX_NAME = 'msvc' + + # linker + if not v.LINK_CXX: + conf.find_program(linker_name, path_list=path, errmsg='%s was not found (linker)' % linker_name, var='LINK_CXX') + + if not v.LINK_CC: + v.LINK_CC = v.LINK_CXX + + # staticlib linker + if not v.AR: + stliblink = conf.find_program(lib_name, path_list=path, var='AR') + if not stliblink: + return + v.ARFLAGS = ['/nologo'] + + # manifest tool. Not required for VS 2003 and below. Must have for VS 2005 and later + if v.MSVC_MANIFEST: + conf.find_program('MT', path_list=path, var='MT') + v.MTFLAGS = ['/nologo'] + + try: + conf.load('winres') + except Errors.ConfigurationError: + Logs.warn('Resource compiler not found. Compiling resource file is disabled') + +@conf +def visual_studio_add_flags(self): + """visual studio flags found in the system environment""" + v = self.env + if self.environ.get('INCLUDE'): + v.prepend_value('INCLUDES', [x for x in self.environ['INCLUDE'].split(';') if x]) # notice the 'S' + if self.environ.get('LIB'): + v.prepend_value('LIBPATH', [x for x in self.environ['LIB'].split(';') if x]) + +@conf +def msvc_common_flags(conf): + """ + Setup the flags required for executing the msvc compiler + """ + v = conf.env + + v.DEST_BINFMT = 'pe' + v.append_value('CFLAGS', ['/nologo']) + v.append_value('CXXFLAGS', ['/nologo']) + v.append_value('LINKFLAGS', ['/nologo']) + v.DEFINES_ST = '/D%s' + + v.CC_SRC_F = '' + v.CC_TGT_F = ['/c', '/Fo'] + v.CXX_SRC_F = '' + v.CXX_TGT_F = ['/c', '/Fo'] + + if (v.MSVC_COMPILER == 'msvc' and v.MSVC_VERSION >= 8) or (v.MSVC_COMPILER == 'wsdk' and v.MSVC_VERSION >= 6): + v.CC_TGT_F = ['/FC'] + v.CC_TGT_F + v.CXX_TGT_F = ['/FC'] + v.CXX_TGT_F + + v.CPPPATH_ST = '/I%s' # template for adding include paths + + v.AR_TGT_F = v.CCLNK_TGT_F = v.CXXLNK_TGT_F = '/OUT:' + + # CRT specific flags + v.CFLAGS_CRT_MULTITHREADED = v.CXXFLAGS_CRT_MULTITHREADED = ['/MT'] + v.CFLAGS_CRT_MULTITHREADED_DLL = v.CXXFLAGS_CRT_MULTITHREADED_DLL = ['/MD'] + + v.CFLAGS_CRT_MULTITHREADED_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DBG = ['/MTd'] + v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = ['/MDd'] + + v.LIB_ST = '%s.lib' + v.LIBPATH_ST = '/LIBPATH:%s' + v.STLIB_ST = '%s.lib' + v.STLIBPATH_ST = '/LIBPATH:%s' + + if v.MSVC_MANIFEST: + v.append_value('LINKFLAGS', ['/MANIFEST']) + + v.CFLAGS_cshlib = [] + v.CXXFLAGS_cxxshlib = [] + v.LINKFLAGS_cshlib = v.LINKFLAGS_cxxshlib = ['/DLL'] + v.cshlib_PATTERN = v.cxxshlib_PATTERN = '%s.dll' + v.implib_PATTERN = '%s.lib' + v.IMPLIB_ST = '/IMPLIB:%s' + + v.LINKFLAGS_cstlib = [] + v.cstlib_PATTERN = v.cxxstlib_PATTERN = '%s.lib' + + v.cprogram_PATTERN = v.cxxprogram_PATTERN = '%s.exe' + + v.def_PATTERN = '/def:%s' + + +####################################################################################################### +##### conf above, build below + +@after_method('apply_link') +@feature('c', 'cxx') +def apply_flags_msvc(self): + """ + Add additional flags implied by msvc, such as subsystems and pdb files:: + + def build(bld): + bld.stlib(source='main.c', target='bar', subsystem='gruik') + """ + if self.env.CC_NAME != 'msvc' or not getattr(self, 'link_task', None): + return + + is_static = isinstance(self.link_task, ccroot.stlink_task) + + subsystem = getattr(self, 'subsystem', '') + if subsystem: + subsystem = '/subsystem:%s' % subsystem + flags = is_static and 'ARFLAGS' or 'LINKFLAGS' + self.env.append_value(flags, subsystem) + + if not is_static: + for f in self.env.LINKFLAGS: + d = f.lower() + if d[1:] in ('debug', 'debug:full', 'debug:fastlink'): + pdbnode = self.link_task.outputs[0].change_ext('.pdb') + self.link_task.outputs.append(pdbnode) + + if getattr(self, 'install_task', None): + self.pdb_install_task = self.add_install_files( + install_to=self.install_task.install_to, install_from=pdbnode) + break + +@feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib') +@after_method('apply_link') +def apply_manifest(self): + """ + Special linker for MSVC with support for embedding manifests into DLL's + and executables compiled by Visual Studio 2005 or probably later. Without + the manifest file, the binaries are unusable. + See: http://msdn2.microsoft.com/en-us/library/ms235542(VS.80).aspx + """ + if self.env.CC_NAME == 'msvc' and self.env.MSVC_MANIFEST and getattr(self, 'link_task', None): + out_node = self.link_task.outputs[0] + man_node = out_node.parent.find_or_declare(out_node.name + '.manifest') + self.link_task.outputs.append(man_node) + self.env.DO_MANIFEST = True + +def make_winapp(self, family): + append = self.env.append_unique + append('DEFINES', 'WINAPI_FAMILY=%s' % family) + append('CXXFLAGS', ['/ZW', '/TP']) + for lib_path in self.env.LIBPATH: + append('CXXFLAGS','/AI%s'%lib_path) + +@feature('winphoneapp') +@after_method('process_use') +@after_method('propagate_uselib_vars') +def make_winphone_app(self): + """ + Insert configuration flags for windows phone applications (adds /ZW, /TP...) + """ + make_winapp(self, 'WINAPI_FAMILY_PHONE_APP') + self.env.append_unique('LINKFLAGS', ['/NODEFAULTLIB:ole32.lib', 'PhoneAppModelHost.lib']) + +@feature('winapp') +@after_method('process_use') +@after_method('propagate_uselib_vars') +def make_windows_app(self): + """ + Insert configuration flags for windows applications (adds /ZW, /TP...) + """ + make_winapp(self, 'WINAPI_FAMILY_DESKTOP_APP') diff --git a/third_party/waf/waflib/Tools/nasm.py b/third_party/waf/waflib/Tools/nasm.py new file mode 100644 index 0000000..9c51c18 --- /dev/null +++ b/third_party/waf/waflib/Tools/nasm.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2008-2018 (ita) + +""" +Nasm tool (asm processing) +""" + +import os +import waflib.Tools.asm # leave this +from waflib.TaskGen import feature + +@feature('asm') +def apply_nasm_vars(self): + """provided for compatibility""" + self.env.append_value('ASFLAGS', self.to_list(getattr(self, 'nasm_flags', []))) + +def configure(conf): + """ + Detect nasm/yasm and set the variable *AS* + """ + conf.find_program(['nasm', 'yasm'], var='AS') + conf.env.AS_TGT_F = ['-o'] + conf.env.ASLNK_TGT_F = ['-o'] + conf.load('asm') + conf.env.ASMPATH_ST = '-I%s' + os.sep + txt = conf.cmd_and_log(conf.env.AS + ['--version']) + if 'yasm' in txt.lower(): + conf.env.ASM_NAME = 'yasm' + else: + conf.env.ASM_NAME = 'nasm' diff --git a/third_party/waf/waflib/Tools/nobuild.py b/third_party/waf/waflib/Tools/nobuild.py new file mode 100644 index 0000000..2e4b055 --- /dev/null +++ b/third_party/waf/waflib/Tools/nobuild.py @@ -0,0 +1,24 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2015 (ita) + +""" +Override the build commands to write empty files. +This is useful for profiling and evaluating the Python overhead. + +To use:: + + def build(bld): + ... + bld.load('nobuild') + +""" + +from waflib import Task +def build(bld): + def run(self): + for x in self.outputs: + x.write('') + for (name, cls) in Task.classes.items(): + cls.run = run + diff --git a/third_party/waf/waflib/Tools/perl.py b/third_party/waf/waflib/Tools/perl.py new file mode 100644 index 0000000..32b03fb --- /dev/null +++ b/third_party/waf/waflib/Tools/perl.py @@ -0,0 +1,156 @@ +#!/usr/bin/env python +# encoding: utf-8 +# andersg at 0x63.nu 2007 +# Thomas Nagy 2016-2018 (ita) + +""" +Support for Perl extensions. A C/C++ compiler is required:: + + def options(opt): + opt.load('compiler_c perl') + def configure(conf): + conf.load('compiler_c perl') + conf.check_perl_version((5,6,0)) + conf.check_perl_ext_devel() + conf.check_perl_module('Cairo') + conf.check_perl_module('Devel::PPPort 4.89') + def build(bld): + bld( + features = 'c cshlib perlext', + source = 'Mytest.xs', + target = 'Mytest', + install_path = '${ARCHDIR_PERL}/auto') + bld.install_files('${ARCHDIR_PERL}', 'Mytest.pm') +""" + +import os +from waflib import Task, Options, Utils, Errors +from waflib.Configure import conf +from waflib.TaskGen import extension, feature, before_method + +@before_method('apply_incpaths', 'apply_link', 'propagate_uselib_vars') +@feature('perlext') +def init_perlext(self): + """ + Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the + *lib* prefix from library names. + """ + self.uselib = self.to_list(getattr(self, 'uselib', [])) + if not 'PERLEXT' in self.uselib: + self.uselib.append('PERLEXT') + self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.perlext_PATTERN + +@extension('.xs') +def xsubpp_file(self, node): + """ + Create :py:class:`waflib.Tools.perl.xsubpp` tasks to process *.xs* files + """ + outnode = node.change_ext('.c') + self.create_task('xsubpp', node, outnode) + self.source.append(outnode) + +class xsubpp(Task.Task): + """ + Process *.xs* files + """ + run_str = '${PERL} ${XSUBPP} -noprototypes -typemap ${EXTUTILS_TYPEMAP} ${SRC} > ${TGT}' + color = 'BLUE' + ext_out = ['.h'] + +@conf +def check_perl_version(self, minver=None): + """ + Check if Perl is installed, and set the variable PERL. + minver is supposed to be a tuple + """ + res = True + if minver: + cver = '.'.join(map(str,minver)) + else: + cver = '' + + self.start_msg('Checking for minimum perl version %s' % cver) + + perl = self.find_program('perl', var='PERL', value=getattr(Options.options, 'perlbinary', None)) + version = self.cmd_and_log(perl + ["-e", 'printf \"%vd\", $^V']) + if not version: + res = False + version = "Unknown" + elif not minver is None: + ver = tuple(map(int, version.split("."))) + if ver < minver: + res = False + + self.end_msg(version, color=res and 'GREEN' or 'YELLOW') + return res + +@conf +def check_perl_module(self, module): + """ + Check if specified perlmodule is installed. + + The minimum version can be specified by specifying it after modulename + like this:: + + def configure(conf): + conf.check_perl_module("Some::Module 2.92") + """ + cmd = self.env.PERL + ['-e', 'use %s' % module] + self.start_msg('perl module %s' % module) + try: + r = self.cmd_and_log(cmd) + except Errors.WafError: + self.end_msg(False) + return None + self.end_msg(r or True) + return r + +@conf +def check_perl_ext_devel(self): + """ + Check for configuration needed to build perl extensions. + + Sets different xxx_PERLEXT variables in the environment. + + Also sets the ARCHDIR_PERL variable useful as installation path, + which can be overridden by ``--with-perl-archdir`` option. + """ + + env = self.env + perl = env.PERL + if not perl: + self.fatal('find perl first') + + def cmd_perl_config(s): + return perl + ['-MConfig', '-e', 'print \"%s\"' % s] + def cfg_str(cfg): + return self.cmd_and_log(cmd_perl_config(cfg)) + def cfg_lst(cfg): + return Utils.to_list(cfg_str(cfg)) + def find_xsubpp(): + for var in ('privlib', 'vendorlib'): + xsubpp = cfg_lst('$Config{%s}/ExtUtils/xsubpp$Config{exe_ext}' % var) + if xsubpp and os.path.isfile(xsubpp[0]): + return xsubpp + return self.find_program('xsubpp') + + env.LINKFLAGS_PERLEXT = cfg_lst('$Config{lddlflags}') + env.INCLUDES_PERLEXT = cfg_lst('$Config{archlib}/CORE') + env.CFLAGS_PERLEXT = cfg_lst('$Config{ccflags} $Config{cccdlflags}') + env.EXTUTILS_TYPEMAP = cfg_lst('$Config{privlib}/ExtUtils/typemap') + env.XSUBPP = find_xsubpp() + + if not getattr(Options.options, 'perlarchdir', None): + env.ARCHDIR_PERL = cfg_str('$Config{sitearch}') + else: + env.ARCHDIR_PERL = getattr(Options.options, 'perlarchdir') + + env.perlext_PATTERN = '%s.' + cfg_str('$Config{dlext}') + +def options(opt): + """ + Add the ``--with-perl-archdir`` and ``--with-perl-binary`` command-line options. + """ + opt.add_option('--with-perl-binary', type='string', dest='perlbinary', help = 'Specify alternate perl binary', default=None) + opt.add_option('--with-perl-archdir', type='string', dest='perlarchdir', help = 'Specify directory where to install arch specific files', default=None) + diff --git a/third_party/waf/waflib/Tools/python.py b/third_party/waf/waflib/Tools/python.py new file mode 100644 index 0000000..b2dd1a9 --- /dev/null +++ b/third_party/waf/waflib/Tools/python.py @@ -0,0 +1,657 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2007-2015 (ita) +# Gustavo Carneiro (gjc), 2007 + +""" +Support for Python, detect the headers and libraries and provide +*use* variables to link C/C++ programs against them:: + + def options(opt): + opt.load('compiler_c python') + def configure(conf): + conf.load('compiler_c python') + conf.check_python_version((2,4,2)) + conf.check_python_headers() + def build(bld): + bld.program(features='pyembed', source='a.c', target='myprog') + bld.shlib(features='pyext', source='b.c', target='mylib') +""" + +import os, sys +from waflib import Errors, Logs, Node, Options, Task, Utils +from waflib.TaskGen import extension, before_method, after_method, feature +from waflib.Configure import conf + +FRAG = ''' +#include <Python.h> +#ifdef __cplusplus +extern "C" { +#endif + void Py_Initialize(void); + void Py_Finalize(void); +#ifdef __cplusplus +} +#endif +int main(int argc, char **argv) +{ + (void)argc; (void)argv; + Py_Initialize(); + Py_Finalize(); + return 0; +} +''' +""" +Piece of C/C++ code used in :py:func:`waflib.Tools.python.check_python_headers` +""" + +INST = ''' +import sys, py_compile +py_compile.compile(sys.argv[1], sys.argv[2], sys.argv[3], True) +''' +""" +Piece of Python code used in :py:class:`waflib.Tools.python.pyo` and :py:class:`waflib.Tools.python.pyc` for byte-compiling python files +""" + +DISTUTILS_IMP = """ +try: + from distutils.sysconfig import get_config_var, get_python_lib +except ImportError: + from sysconfig import get_config_var, get_path + def get_python_lib(*k, **kw): + keyword='platlib' if kw.get('plat_specific') else 'purelib' + if 'prefix' in kw: + return get_path(keyword, vars={'installed_base': kw['prefix'], 'platbase': kw['prefix']}) + return get_path(keyword) +""".splitlines() + +@before_method('process_source') +@feature('py') +def feature_py(self): + """ + Create tasks to byte-compile .py files and install them, if requested + """ + self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') + install_from = getattr(self, 'install_from', None) + if install_from and not isinstance(install_from, Node.Node): + install_from = self.path.find_dir(install_from) + self.install_from = install_from + + ver = self.env.PYTHON_VERSION + if not ver: + self.bld.fatal('Installing python files requires PYTHON_VERSION, try conf.check_python_version') + + if int(ver.replace('.', '')) > 31: + self.install_32 = True + +@extension('.py') +def process_py(self, node): + """ + Add signature of .py file, so it will be byte-compiled when necessary + """ + assert(hasattr(self, 'install_path')), 'add features="py" for target "%s" in "%s/wscript".' % (self.target, self.path.nice_path()) + self.install_from = getattr(self, 'install_from', None) + relative_trick = getattr(self, 'relative_trick', True) + if self.install_from: + assert isinstance(self.install_from, Node.Node), \ + 'add features="py" for target "%s" in "%s/wscript" (%s).' % (self.target, self.path.nice_path(), type(self.install_from)) + + # where to install the python file + if self.install_path: + if self.install_from: + self.add_install_files(install_to=self.install_path, install_from=node, cwd=self.install_from, relative_trick=relative_trick) + else: + self.add_install_files(install_to=self.install_path, install_from=node, relative_trick=relative_trick) + + lst = [] + if self.env.PYC: + lst.append('pyc') + if self.env.PYO: + lst.append('pyo') + + if self.install_path: + if self.install_from: + target_dir = node.path_from(self.install_from) if relative_trick else node.name + pyd = Utils.subst_vars("%s/%s" % (self.install_path, target_dir), self.env) + else: + target_dir = node.path_from(self.path) if relative_trick else node.name + pyd = Utils.subst_vars("%s/%s" % (self.install_path, target_dir), self.env) + else: + pyd = node.abspath() + + for ext in lst: + if self.env.PYTAG and not self.env.NOPYCACHE: + # __pycache__ installation for python 3.2 - PEP 3147 + name = node.name[:-3] + pyobj = node.parent.get_bld().make_node('__pycache__').make_node("%s.%s.%s" % (name, self.env.PYTAG, ext)) + pyobj.parent.mkdir() + else: + pyobj = node.change_ext(".%s" % ext) + + tsk = self.create_task(ext, node, pyobj) + tsk.pyd = pyd + + if self.install_path: + self.add_install_files(install_to=os.path.dirname(pyd), install_from=pyobj, cwd=node.parent.get_bld(), relative_trick=relative_trick) + +class pyc(Task.Task): + """ + Byte-compiling python files + """ + color = 'PINK' + def __str__(self): + node = self.outputs[0] + return node.path_from(node.ctx.launch_node()) + def run(self): + cmd = [Utils.subst_vars('${PYTHON}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(), self.pyd] + ret = self.generator.bld.exec_command(cmd) + return ret + +class pyo(Task.Task): + """ + Byte-compiling python files + """ + color = 'PINK' + def __str__(self): + node = self.outputs[0] + return node.path_from(node.ctx.launch_node()) + def run(self): + cmd = [Utils.subst_vars('${PYTHON}', self.env), Utils.subst_vars('${PYFLAGS_OPT}', self.env), '-c', INST, self.inputs[0].abspath(), self.outputs[0].abspath(), self.pyd] + ret = self.generator.bld.exec_command(cmd) + return ret + +@feature('pyext') +@before_method('propagate_uselib_vars', 'apply_link') +@after_method('apply_bundle') +def init_pyext(self): + """ + Change the values of *cshlib_PATTERN* and *cxxshlib_PATTERN* to remove the + *lib* prefix from library names. + """ + self.uselib = self.to_list(getattr(self, 'uselib', [])) + if not 'PYEXT' in self.uselib: + self.uselib.append('PYEXT') + # override shlib_PATTERN set by the osx module + self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.macbundle_PATTERN = self.env.pyext_PATTERN + self.env.fcshlib_PATTERN = self.env.dshlib_PATTERN = self.env.pyext_PATTERN + + try: + if not self.install_path: + return + except AttributeError: + self.install_path = '${PYTHONARCHDIR}' + +@feature('pyext') +@before_method('apply_link', 'apply_bundle') +def set_bundle(self): + """Mac-specific pyext extension that enables bundles from c_osx.py""" + if Utils.unversioned_sys_platform() == 'darwin': + self.mac_bundle = True + +@before_method('propagate_uselib_vars') +@feature('pyembed') +def init_pyembed(self): + """ + Add the PYEMBED variable. + """ + self.uselib = self.to_list(getattr(self, 'uselib', [])) + if not 'PYEMBED' in self.uselib: + self.uselib.append('PYEMBED') + +@conf +def get_python_variables(self, variables, imports=None): + """ + Spawn a new python process to dump configuration variables + + :param variables: variables to print + :type variables: list of string + :param imports: one import by element + :type imports: list of string + :return: the variable values + :rtype: list of string + """ + if not imports: + try: + imports = self.python_imports + except AttributeError: + imports = DISTUTILS_IMP + + program = list(imports) # copy + program.append('') + for v in variables: + program.append("print(repr(%s))" % v) + os_env = dict(os.environ) + try: + del os_env['MACOSX_DEPLOYMENT_TARGET'] # see comments in the OSX tool + except KeyError: + pass + + try: + out = self.cmd_and_log(self.env.PYTHON + ['-c', '\n'.join(program)], env=os_env) + except Errors.WafError: + self.fatal('Could not run %r' % self.env.PYTHON) + self.to_log(out) + return_values = [] + for s in out.splitlines(): + s = s.strip() + if not s: + continue + if s == 'None': + return_values.append(None) + elif (s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"'): + return_values.append(eval(s)) + elif s[0].isdigit(): + return_values.append(int(s)) + else: break + return return_values + +@conf +def test_pyembed(self, mode, msg='Testing pyembed configuration'): + self.check(header_name='Python.h', define_name='HAVE_PYEMBED', msg=msg, + fragment=FRAG, errmsg='Could not build a python embedded interpreter', + features='%s %sprogram pyembed' % (mode, mode)) + +@conf +def test_pyext(self, mode, msg='Testing pyext configuration'): + self.check(header_name='Python.h', define_name='HAVE_PYEXT', msg=msg, + fragment=FRAG, errmsg='Could not build python extensions', + features='%s %sshlib pyext' % (mode, mode)) + +@conf +def python_cross_compile(self, features='pyembed pyext'): + """ + For cross-compilation purposes, it is possible to bypass the normal detection and set the flags that you want: + PYTHON_VERSION='3.4' PYTAG='cpython34' pyext_PATTERN="%s.so" PYTHON_LDFLAGS='-lpthread -ldl' waf configure + + The following variables are used: + PYTHON_VERSION required + PYTAG required + PYTHON_LDFLAGS required + pyext_PATTERN required + PYTHON_PYEXT_LDFLAGS + PYTHON_PYEMBED_LDFLAGS + """ + features = Utils.to_list(features) + if not ('PYTHON_LDFLAGS' in self.environ or 'PYTHON_PYEXT_LDFLAGS' in self.environ or 'PYTHON_PYEMBED_LDFLAGS' in self.environ): + return False + + for x in 'PYTHON_VERSION PYTAG pyext_PATTERN'.split(): + if not x in self.environ: + self.fatal('Please set %s in the os environment' % x) + else: + self.env[x] = self.environ[x] + + xx = self.env.CXX_NAME and 'cxx' or 'c' + if 'pyext' in features: + flags = self.environ.get('PYTHON_PYEXT_LDFLAGS', self.environ.get('PYTHON_LDFLAGS')) + if flags is None: + self.fatal('No flags provided through PYTHON_PYEXT_LDFLAGS as required') + else: + self.parse_flags(flags, 'PYEXT') + self.test_pyext(xx) + if 'pyembed' in features: + flags = self.environ.get('PYTHON_PYEMBED_LDFLAGS', self.environ.get('PYTHON_LDFLAGS')) + if flags is None: + self.fatal('No flags provided through PYTHON_PYEMBED_LDFLAGS as required') + else: + self.parse_flags(flags, 'PYEMBED') + self.test_pyembed(xx) + return True + +@conf +def check_python_headers(conf, features='pyembed pyext'): + """ + Check for headers and libraries necessary to extend or embed python. + It may use the module *distutils* or sysconfig in newer Python versions. + On success the environment variables xxx_PYEXT and xxx_PYEMBED are added: + + * PYEXT: for compiling python extensions + * PYEMBED: for embedding a python interpreter + """ + features = Utils.to_list(features) + assert ('pyembed' in features) or ('pyext' in features), "check_python_headers features must include 'pyembed' and/or 'pyext'" + env = conf.env + if not env.CC_NAME and not env.CXX_NAME: + conf.fatal('load a compiler first (gcc, g++, ..)') + + # bypass all the code below for cross-compilation + if conf.python_cross_compile(features): + return + + if not env.PYTHON_VERSION: + conf.check_python_version() + + pybin = env.PYTHON + if not pybin: + conf.fatal('Could not find the python executable') + + # so we actually do all this for compatibility reasons and for obtaining pyext_PATTERN below + v = 'prefix SO EXT_SUFFIX LDFLAGS LIBDIR LIBPL INCLUDEPY Py_ENABLE_SHARED MACOSX_DEPLOYMENT_TARGET LDSHARED CFLAGS LDVERSION'.split() + try: + lst = conf.get_python_variables(["get_config_var('%s') or ''" % x for x in v]) + except RuntimeError: + conf.fatal("Python development headers not found (-v for details).") + + vals = ['%s = %r' % (x, y) for (x, y) in zip(v, lst)] + conf.to_log("Configuration returned from %r:\n%s\n" % (pybin, '\n'.join(vals))) + + dct = dict(zip(v, lst)) + x = 'MACOSX_DEPLOYMENT_TARGET' + if dct[x]: + env[x] = conf.environ[x] = str(dct[x]) + env.pyext_PATTERN = '%s' + (dct['EXT_SUFFIX'] or dct['SO']) # SO is deprecated in 3.5 and removed in 3.11 + + + # Try to get pythonX.Y-config + num = '.'.join(env.PYTHON_VERSION.split('.')[:2]) + conf.find_program([''.join(pybin) + '-config', 'python%s-config' % num, 'python-config-%s' % num, 'python%sm-config' % num], var='PYTHON_CONFIG', msg="python-config", mandatory=False) + + if env.PYTHON_CONFIG: + # check python-config output only once + if conf.env.HAVE_PYTHON_H: + return + + # python2.6-config requires 3 runs + all_flags = [['--cflags', '--libs', '--ldflags']] + if sys.hexversion < 0x2070000: + all_flags = [[k] for k in all_flags[0]] + + xx = env.CXX_NAME and 'cxx' or 'c' + + if 'pyembed' in features: + for flags in all_flags: + # Python 3.8 has different flags for pyembed, needs --embed + embedflags = flags + ['--embed'] + try: + conf.check_cfg(msg='Asking python-config for pyembed %r flags' % ' '.join(embedflags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=embedflags) + except conf.errors.ConfigurationError: + # However Python < 3.8 doesn't accept --embed, so we need a fallback + conf.check_cfg(msg='Asking python-config for pyembed %r flags' % ' '.join(flags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEMBED', args=flags) + + try: + conf.test_pyembed(xx) + except conf.errors.ConfigurationError: + # python bug 7352 + if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']: + env.append_unique('LIBPATH_PYEMBED', [dct['LIBDIR']]) + conf.test_pyembed(xx) + else: + raise + + if 'pyext' in features: + for flags in all_flags: + conf.check_cfg(msg='Asking python-config for pyext %r flags' % ' '.join(flags), path=env.PYTHON_CONFIG, package='', uselib_store='PYEXT', args=flags) + + try: + conf.test_pyext(xx) + except conf.errors.ConfigurationError: + # python bug 7352 + if dct['Py_ENABLE_SHARED'] and dct['LIBDIR']: + env.append_unique('LIBPATH_PYEXT', [dct['LIBDIR']]) + conf.test_pyext(xx) + else: + raise + + conf.define('HAVE_PYTHON_H', 1) + return + + # No python-config, do something else on windows systems + all_flags = dct['LDFLAGS'] + ' ' + dct['CFLAGS'] + conf.parse_flags(all_flags, 'PYEMBED') + + all_flags = dct['LDFLAGS'] + ' ' + dct['LDSHARED'] + ' ' + dct['CFLAGS'] + conf.parse_flags(all_flags, 'PYEXT') + + result = None + if not dct["LDVERSION"]: + dct["LDVERSION"] = env.PYTHON_VERSION + + # further simplification will be complicated + for name in ('python' + dct['LDVERSION'], 'python' + env.PYTHON_VERSION + 'm', 'python' + env.PYTHON_VERSION.replace('.', '')): + + # LIBPATH_PYEMBED is already set; see if it works. + if not result and env.LIBPATH_PYEMBED: + path = env.LIBPATH_PYEMBED + conf.to_log("\n\n# Trying default LIBPATH_PYEMBED: %r\n" % path) + result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBPATH_PYEMBED' % name) + + if not result and dct['LIBDIR']: + path = [dct['LIBDIR']] + conf.to_log("\n\n# try again with -L$python_LIBDIR: %r\n" % path) + result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in LIBDIR' % name) + + if not result and dct['LIBPL']: + path = [dct['LIBPL']] + conf.to_log("\n\n# try again with -L$python_LIBPL (some systems don't install the python library in $prefix/lib)\n") + result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in python_LIBPL' % name) + + if not result: + path = [os.path.join(dct['prefix'], "libs")] + conf.to_log("\n\n# try again with -L$prefix/libs, and pythonXY rather than pythonX.Y (win32)\n") + result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in $prefix/libs' % name) + + if not result: + path = [os.path.normpath(os.path.join(dct['INCLUDEPY'], '..', 'libs'))] + conf.to_log("\n\n# try again with -L$INCLUDEPY/../libs, and pythonXY rather than pythonX.Y (win32)\n") + result = conf.check(lib=name, uselib='PYEMBED', libpath=path, mandatory=False, msg='Checking for library %s in $INCLUDEPY/../libs' % name) + + if result: + break # do not forget to set LIBPATH_PYEMBED + + if result: + env.LIBPATH_PYEMBED = path + env.append_value('LIB_PYEMBED', [name]) + else: + conf.to_log("\n\n### LIB NOT FOUND\n") + + # under certain conditions, python extensions must link to + # python libraries, not just python embedding programs. + if Utils.is_win32 or dct['Py_ENABLE_SHARED']: + env.LIBPATH_PYEXT = env.LIBPATH_PYEMBED + env.LIB_PYEXT = env.LIB_PYEMBED + + conf.to_log("Found an include path for Python extensions: %r\n" % (dct['INCLUDEPY'],)) + env.INCLUDES_PYEXT = [dct['INCLUDEPY']] + env.INCLUDES_PYEMBED = [dct['INCLUDEPY']] + + # Code using the Python API needs to be compiled with -fno-strict-aliasing + if env.CC_NAME == 'gcc': + env.append_unique('CFLAGS_PYEMBED', ['-fno-strict-aliasing']) + env.append_unique('CFLAGS_PYEXT', ['-fno-strict-aliasing']) + if env.CXX_NAME == 'gcc': + env.append_unique('CXXFLAGS_PYEMBED', ['-fno-strict-aliasing']) + env.append_unique('CXXFLAGS_PYEXT', ['-fno-strict-aliasing']) + + if env.CC_NAME == "msvc": + try: + from distutils.msvccompiler import MSVCCompiler + except ImportError: + # From https://github.com/python/cpython/blob/main/Lib/distutils/msvccompiler.py + env.append_value('CFLAGS_PYEXT', [ '/nologo', '/Ox', '/MD', '/W3', '/GX', '/DNDEBUG']) + env.append_value('CXXFLAGS_PYEXT', [ '/nologo', '/Ox', '/MD', '/W3', '/GX', '/DNDEBUG']) + env.append_value('LINKFLAGS_PYEXT', ['/DLL', '/nologo', '/INCREMENTAL:NO']) + else: + dist_compiler = MSVCCompiler() + dist_compiler.initialize() + env.append_value('CFLAGS_PYEXT', dist_compiler.compile_options) + env.append_value('CXXFLAGS_PYEXT', dist_compiler.compile_options) + env.append_value('LINKFLAGS_PYEXT', dist_compiler.ldflags_shared) + + conf.check(header_name='Python.h', define_name='HAVE_PYTHON_H', uselib='PYEMBED', fragment=FRAG, errmsg='Could not build a Python embedded interpreter') + +@conf +def check_python_version(conf, minver=None): + """ + Check if the python interpreter is found matching a given minimum version. + minver should be a tuple, eg. to check for python >= 2.4.2 pass (2,4,2) as minver. + + If successful, PYTHON_VERSION is defined as 'MAJOR.MINOR' (eg. '2.4') + of the actual python version found, and PYTHONDIR and PYTHONARCHDIR + are defined, pointing to the site-packages directories appropriate for + this python version, where modules/packages/extensions should be + installed. + + :param minver: minimum version + :type minver: tuple of int + """ + assert minver is None or isinstance(minver, tuple) + pybin = conf.env.PYTHON + if not pybin: + conf.fatal('could not find the python executable') + + # Get python version string + cmd = pybin + ['-c', 'import sys\nfor x in sys.version_info: print(str(x))'] + Logs.debug('python: Running python command %r', cmd) + lines = conf.cmd_and_log(cmd).split() + assert len(lines) == 5, "found %r lines, expected 5: %r" % (len(lines), lines) + pyver_tuple = (int(lines[0]), int(lines[1]), int(lines[2]), lines[3], int(lines[4])) + + # Compare python version with the minimum required + result = (minver is None) or (pyver_tuple >= minver) + + if result: + # define useful environment variables + pyver = '.'.join([str(x) for x in pyver_tuple[:2]]) + conf.env.PYTHON_VERSION = pyver + + if 'PYTHONDIR' in conf.env: + # Check if --pythondir was specified + pydir = conf.env.PYTHONDIR + elif 'PYTHONDIR' in conf.environ: + # Check environment for PYTHONDIR + pydir = conf.environ['PYTHONDIR'] + else: + # Finally, try to guess + if Utils.is_win32: + (pydir,) = conf.get_python_variables(["get_python_lib(standard_lib=0) or ''"]) + else: + (pydir,) = conf.get_python_variables(["get_python_lib(standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX]) + + if 'PYTHONARCHDIR' in conf.env: + # Check if --pythonarchdir was specified + pyarchdir = conf.env.PYTHONARCHDIR + elif 'PYTHONARCHDIR' in conf.environ: + # Check environment for PYTHONDIR + pyarchdir = conf.environ['PYTHONARCHDIR'] + else: + # Finally, try to guess + (pyarchdir, ) = conf.get_python_variables(["get_python_lib(plat_specific=1, standard_lib=0, prefix=%r) or ''" % conf.env.PREFIX]) + if not pyarchdir: + pyarchdir = pydir + + if hasattr(conf, 'define'): # conf.define is added by the C tool, so may not exist + conf.define('PYTHONDIR', pydir) + conf.define('PYTHONARCHDIR', pyarchdir) + + conf.env.PYTHONDIR = pydir + conf.env.PYTHONARCHDIR = pyarchdir + + # Feedback + pyver_full = '.'.join(map(str, pyver_tuple[:3])) + if minver is None: + conf.msg('Checking for python version', pyver_full) + else: + minver_str = '.'.join(map(str, minver)) + conf.msg('Checking for python version >= %s' % (minver_str,), pyver_full, color=result and 'GREEN' or 'YELLOW') + + if not result: + conf.fatal('The python version is too old, expecting %r' % (minver,)) + +PYTHON_MODULE_TEMPLATE = ''' +import %s as current_module +version = getattr(current_module, '__version__', None) +if version is not None: + print(str(version)) +else: + print('unknown version') +''' + +@conf +def check_python_module(conf, module_name, condition=''): + """ + Check if the selected python interpreter can import the given python module:: + + def configure(conf): + conf.check_python_module('pygccxml') + conf.check_python_module('re', condition="ver > num(2, 0, 4) and ver <= num(3, 0, 0)") + + :param module_name: module + :type module_name: string + """ + msg = "Checking for python module %r" % module_name + if condition: + msg = '%s (%s)' % (msg, condition) + conf.start_msg(msg) + try: + ret = conf.cmd_and_log(conf.env.PYTHON + ['-c', PYTHON_MODULE_TEMPLATE % module_name]) + except Errors.WafError: + conf.end_msg(False) + conf.fatal('Could not find the python module %r' % module_name) + + ret = ret.strip() + if condition: + conf.end_msg(ret) + if ret == 'unknown version': + conf.fatal('Could not check the %s version' % module_name) + + def num(*k): + if isinstance(k[0], int): + return Utils.loose_version('.'.join([str(x) for x in k])) + else: + return Utils.loose_version(k[0]) + d = {'num': num, 'ver': Utils.loose_version(ret)} + ev = eval(condition, {}, d) + if not ev: + conf.fatal('The %s version does not satisfy the requirements' % module_name) + else: + if ret == 'unknown version': + conf.end_msg(True) + else: + conf.end_msg(ret) + +def configure(conf): + """ + Detect the python interpreter + """ + v = conf.env + if getattr(Options.options, 'pythondir', None): + v.PYTHONDIR = Options.options.pythondir + if getattr(Options.options, 'pythonarchdir', None): + v.PYTHONARCHDIR = Options.options.pythonarchdir + if getattr(Options.options, 'nopycache', None): + v.NOPYCACHE=Options.options.nopycache + + if not v.PYTHON: + v.PYTHON = [getattr(Options.options, 'python', None) or sys.executable] + v.PYTHON = Utils.to_list(v.PYTHON) + conf.find_program('python', var='PYTHON') + + v.PYFLAGS = '' + v.PYFLAGS_OPT = '-O' + + v.PYC = getattr(Options.options, 'pyc', 1) + v.PYO = getattr(Options.options, 'pyo', 1) + + try: + v.PYTAG = conf.cmd_and_log(conf.env.PYTHON + ['-c', "import sys\ntry:\n print(sys.implementation.cache_tag)\nexcept AttributeError:\n import imp\n print(imp.get_tag())\n"]).strip() + except Errors.WafError: + pass + +def options(opt): + """ + Add python-specific options + """ + pyopt=opt.add_option_group("Python Options") + pyopt.add_option('--nopyc', dest = 'pyc', action='store_false', default=1, + help = 'Do not install bytecode compiled .pyc files (configuration) [Default:install]') + pyopt.add_option('--nopyo', dest='pyo', action='store_false', default=1, + help='Do not install optimised compiled .pyo files (configuration) [Default:install]') + pyopt.add_option('--nopycache',dest='nopycache', action='store_true', + help='Do not use __pycache__ directory to install objects [Default:auto]') + pyopt.add_option('--python', dest="python", + help='python binary to be used [Default: %s]' % sys.executable) + pyopt.add_option('--pythondir', dest='pythondir', + help='Installation path for python modules (py, platform-independent .py and .pyc files)') + pyopt.add_option('--pythonarchdir', dest='pythonarchdir', + help='Installation path for python extension (pyext, platform-dependent .so or .dylib files)') + diff --git a/third_party/waf/waflib/Tools/qt5.py b/third_party/waf/waflib/Tools/qt5.py new file mode 100644 index 0000000..0932e94 --- /dev/null +++ b/third_party/waf/waflib/Tools/qt5.py @@ -0,0 +1,890 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Rafaël Kooi, 2023 (RA-Kooi) + +""" +This tool helps with finding Qt5 and Qt6 tools and libraries, +and also provides syntactic sugar for using Qt5 and Qt6 tools. + +The following snippet illustrates the tool usage:: + + def options(opt): + opt.load('compiler_cxx qt5') + + def configure(conf): + conf.load('compiler_cxx qt5') + + def build(bld): + bld( + features = 'qt5 cxx cxxprogram', + uselib = 'QT5CORE QT5GUI QT5OPENGL QT5SVG', + source = 'main.cpp textures.qrc aboutDialog.ui', + target = 'window', + ) + +Alternatively the following snippet illustrates Qt6 tool usage:: + + def options(opt): + opt.load('compiler_cxx qt5') + + def configure(conf): + conf.want_qt6 = True + conf.load('compiler_cxx qt5') + + def build(bld): + bld( + features = 'qt6 cxx cxxprogram', + uselib = 'QT6CORE QT6GUI QT6OPENGL QT6SVG', + source = 'main.cpp textures.qrc aboutDialog.ui', + target = 'window', + ) + +Here, the UI description and resource files will be processed +to generate code. + +Usage +===== + +Load the "qt5" tool. + +You also need to edit your sources accordingly: + +- the normal way of doing things is to have your C++ files + include the .moc file. + This is regarded as the best practice (and provides much faster + compilations). + It also implies that the include paths have beenset properly. + +- to have the include paths added automatically, use the following:: + + from waflib.TaskGen import feature, before_method, after_method + @feature('cxx') + @after_method('process_source') + @before_method('apply_incpaths') + def add_includes_paths(self): + incs = set(self.to_list(getattr(self, 'includes', ''))) + for x in self.compiled_tasks: + incs.add(x.inputs[0].parent.path_from(self.path)) + self.includes = sorted(incs) + +Note: another tool provides Qt processing that does not require +.moc includes, see 'playground/slow_qt/'. + +A few options (--qt{dir,bin,...}) and environment variables +(QT5_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, +tool path selection, etc; please read the source for more info. +For Qt6 replace the QT5_ prefix with QT6_. + +The detection uses pkg-config on Linux by default. The list of +libraries to be requested to pkg-config is formulated by scanning +in the QTLIBS directory (that can be passed via --qtlibs or by +setting the environment variable QT5_LIBDIR or QT6_LIBDIR otherwise is +derived by querying qmake for QT_INSTALL_LIBS directory) for +shared/static libraries present. +Alternatively the list of libraries to be requested via pkg-config +can be set using the qt5_vars attribute, ie: + + conf.qt5_vars = ['Qt5Core', 'Qt5Gui', 'Qt5Widgets', 'Qt5Test']; + +For Qt6 use the qt6_vars attribute. + +This can speed up configuration phase if needed libraries are +known beforehand, can improve detection on systems with a +sparse QT5/Qt6 libraries installation (ie. NIX) and can improve +detection of some header-only Qt modules (ie. Qt5UiPlugin). + +To force static library detection use: +QT5_XCOMPILE=1 QT5_FORCE_STATIC=1 waf configure + +To use Qt6 set the want_qt6 attribute, ie: + + conf.want_qt6 = True; +""" + +from __future__ import with_statement + +try: + from xml.sax import make_parser + from xml.sax.handler import ContentHandler +except ImportError: + has_xml = False + ContentHandler = object +else: + has_xml = True + +import os, sys, re +from waflib.Tools import cxx +from waflib import Build, Task, Utils, Options, Errors, Context +from waflib.TaskGen import feature, after_method, extension, before_method +from waflib.Configure import conf +from waflib import Logs + +MOC_H = ['.h', '.hpp', '.hxx', '.hh'] +""" +File extensions associated to .moc files +""" + +EXT_RCC = ['.qrc'] +""" +File extension for the resource (.qrc) files +""" + +EXT_UI = ['.ui'] +""" +File extension for the user interface (.ui) files +""" + +EXT_QT5 = ['.cpp', '.cc', '.cxx', '.C'] +""" +File extensions of C++ files that may require a .moc processing +""" + +class qxx(Task.classes['cxx']): + """ + Each C++ file can have zero or several .moc files to create. + They are known only when the files are scanned (preprocessor) + To avoid scanning the c++ files each time (parsing C/C++), the results + are retrieved from the task cache (bld.node_deps/bld.raw_deps). + The moc tasks are also created *dynamically* during the build. + """ + + def __init__(self, *k, **kw): + Task.Task.__init__(self, *k, **kw) + self.moc_done = 0 + + def runnable_status(self): + """ + Compute the task signature to make sure the scanner was executed. Create the + moc tasks by using :py:meth:`waflib.Tools.qt5.qxx.add_moc_tasks` (if necessary), + then postpone the task execution (there is no need to recompute the task signature). + """ + if self.moc_done: + return Task.Task.runnable_status(self) + else: + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + self.add_moc_tasks() + return Task.Task.runnable_status(self) + + def create_moc_task(self, h_node, m_node): + """ + If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) + It is not possible to change the file names, but we can assume that the moc transformation will be identical, + and the moc tasks can be shared in a global cache. + """ + try: + moc_cache = self.generator.bld.moc_cache + except AttributeError: + moc_cache = self.generator.bld.moc_cache = {} + + try: + return moc_cache[h_node] + except KeyError: + tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator) + tsk.set_inputs(h_node) + tsk.set_outputs(m_node) + tsk.env.append_unique('MOC_FLAGS', '-i') + + if self.generator: + self.generator.tasks.append(tsk) + + # direct injection in the build phase (safe because called from the main thread) + gen = self.generator.bld.producer + gen.outstanding.append(tsk) + gen.total += 1 + + return tsk + + else: + # remove the signature, it must be recomputed with the moc task + delattr(self, 'cache_sig') + + def add_moc_tasks(self): + """ + Creates moc tasks by looking in the list of file dependencies ``bld.raw_deps[self.uid()]`` + """ + node = self.inputs[0] + bld = self.generator.bld + + # skip on uninstall due to generated files + if bld.is_install == Build.UNINSTALL: + return + + try: + # compute the signature once to know if there is a moc file to create + self.signature() + except KeyError: + # the moc file may be referenced somewhere else + pass + else: + # remove the signature, it must be recomputed with the moc task + delattr(self, 'cache_sig') + + include_nodes = [node.parent] + self.generator.includes_nodes + + moctasks = [] + mocfiles = set() + for d in bld.raw_deps.get(self.uid(), []): + if not d.endswith('.moc'): + continue + + # process that base.moc only once + if d in mocfiles: + continue + mocfiles.add(d) + + # find the source associated with the moc file + h_node = None + base2 = d[:-4] + + # foo.moc from foo.cpp + prefix = node.name[:node.name.rfind('.')] + if base2 == prefix: + h_node = node + else: + # this deviates from the standard + # if bar.cpp includes foo.moc, then assume it is from foo.h + for x in include_nodes: + for e in MOC_H: + h_node = x.find_node(base2 + e) + if h_node: + break + else: + continue + break + if h_node: + m_node = h_node.change_ext('.moc') + else: + raise Errors.WafError('No source found for %r which is a moc file' % d) + + # create the moc task + task = self.create_moc_task(h_node, m_node) + moctasks.append(task) + + # simple scheduler dependency: run the moc task before others + self.run_after.update(set(moctasks)) + self.moc_done = 1 + +class trans_update(Task.Task): + """Updates a .ts files from a list of C++ files""" + run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' + color = 'BLUE' + +class XMLHandler(ContentHandler): + """ + Parses ``.qrc`` files + """ + def __init__(self): + ContentHandler.__init__(self) + self.buf = [] + self.files = [] + def startElement(self, name, attrs): + if name == 'file': + self.buf = [] + def endElement(self, name): + if name == 'file': + self.files.append(str(''.join(self.buf))) + def characters(self, cars): + self.buf.append(cars) + +@extension(*EXT_RCC) +def create_rcc_task(self, node): + "Creates rcc and cxx tasks for ``.qrc`` files" + rcnode = node.change_ext('_rc.%d.cpp' % self.idx) + self.create_task('rcc', node, rcnode) + cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) + try: + self.compiled_tasks.append(cpptask) + except AttributeError: + self.compiled_tasks = [cpptask] + return cpptask + +@extension(*EXT_UI) +def create_uic_task(self, node): + "Create uic tasks for user interface ``.ui`` definition files" + + """ + If UIC file is used in more than one bld, we would have a conflict in parallel execution + It is not possible to change the file names (like .self.idx. as for objects) as they have + to be referenced by the source file, but we can assume that the transformation will be identical + and the tasks can be shared in a global cache. + """ + try: + uic_cache = self.bld.uic_cache + except AttributeError: + uic_cache = self.bld.uic_cache = {} + + if node not in uic_cache: + uictask = uic_cache[node] = self.create_task('ui5', node) + uictask.outputs = [node.parent.find_or_declare(self.env.ui_PATTERN % node.name[:-3])] + +@extension('.ts') +def add_lang(self, node): + """Adds all the .ts file into ``self.lang``""" + self.lang = self.to_list(getattr(self, 'lang', [])) + [node] + +@feature('qt5', 'qt6') +@before_method('process_source') +def process_mocs(self): + """ + Processes MOC files included in headers:: + + def build(bld): + bld.program(features='qt5', source='main.cpp', target='app', use='QT5CORE', moc='foo.h') + + The build will run moc on foo.h to create moc_foo.n.cpp. The number in the file name + is provided to avoid name clashes when the same headers are used by several targets. + """ + lst = self.to_nodes(getattr(self, 'moc', [])) + self.source = self.to_list(getattr(self, 'source', [])) + for x in lst: + prefix = x.name[:x.name.rfind('.')] # foo.h -> foo + moc_target = 'moc_%s.%d.cpp' % (prefix, self.idx) + moc_node = x.parent.find_or_declare(moc_target) + self.source.append(moc_node) + + self.create_task('moc', x, moc_node) + +@feature('qt5', 'qt6') +@after_method('apply_link') +def apply_qt5(self): + """ + Adds MOC_FLAGS which may be necessary for moc:: + + def build(bld): + bld.program(features='qt5', source='main.cpp', target='app', use='QT5CORE') + + The additional parameters are: + + :param lang: list of translation files (\\*.ts) to process + :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension + :param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**) + :type update: bool + :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file + :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension + """ + if getattr(self, 'lang', None): + qmtasks = [] + for x in self.to_list(self.lang): + if isinstance(x, str): + x = self.path.find_resource(x + '.ts') + qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.%d.qm' % self.idx))) + + if getattr(self, 'update', None) and Options.options.trans_qt5: + cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ + a.inputs[0] for a in self.tasks if a.inputs and a.inputs[0].name.endswith('.ui')] + for x in qmtasks: + self.create_task('trans_update', cxxnodes, x.inputs) + + if getattr(self, 'langname', None): + qmnodes = [x.outputs[0] for x in qmtasks] + rcnode = self.langname + if isinstance(rcnode, str): + rcnode = self.path.find_or_declare(rcnode + ('.%d.qrc' % self.idx)) + t = self.create_task('qm2rcc', qmnodes, rcnode) + k = create_rcc_task(self, t.outputs[0]) + self.link_task.inputs.append(k.outputs[0]) + + lst = [] + for flag in self.to_list(self.env.CXXFLAGS): + if len(flag) < 2: + continue + f = flag[0:2] + if f in ('-D', '-I', '/D', '/I'): + if (f[0] == '/'): + lst.append('-' + flag[1:]) + else: + lst.append(flag) + self.env.append_value('MOC_FLAGS', lst) + +@extension(*EXT_QT5) +def cxx_hook(self, node): + """ + Re-maps C++ file extensions to the :py:class:`waflib.Tools.qt5.qxx` task. + """ + return self.create_compiled_task('qxx', node) + +class rcc(Task.Task): + """ + Processes ``.qrc`` files + """ + color = 'BLUE' + run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' + ext_out = ['.h'] + + def rcname(self): + return os.path.splitext(self.inputs[0].name)[0] + + def scan(self): + """Parse the *.qrc* files""" + if not has_xml: + Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') + return ([], []) + + parser = make_parser() + curHandler = XMLHandler() + parser.setContentHandler(curHandler) + with open(self.inputs[0].abspath(), 'r') as f: + parser.parse(f) + + nodes = [] + names = [] + root = self.inputs[0].parent + for x in curHandler.files: + nd = root.find_resource(x) + if nd: + nodes.append(nd) + else: + names.append(x) + return (nodes, names) + + def quote_flag(self, x): + """ + Override Task.quote_flag. QT parses the argument files + differently than cl.exe and link.exe + + :param x: flag + :type x: string + :return: quoted flag + :rtype: string + """ + return x + + +class moc(Task.Task): + """ + Creates ``.moc`` files + """ + color = 'BLUE' + run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' + + def quote_flag(self, x): + """ + Override Task.quote_flag. QT parses the argument files + differently than cl.exe and link.exe + + :param x: flag + :type x: string + :return: quoted flag + :rtype: string + """ + return x + + +class ui5(Task.Task): + """ + Processes ``.ui`` files + """ + color = 'BLUE' + run_str = '${QT_UIC} ${SRC} -o ${TGT}' + ext_out = ['.h'] + +class ts2qm(Task.Task): + """ + Generates ``.qm`` files from ``.ts`` files + """ + color = 'BLUE' + run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' + +class qm2rcc(Task.Task): + """ + Generates ``.qrc`` files from ``.qm`` files + """ + color = 'BLUE' + after = 'ts2qm' + def run(self): + """Create a qrc file including the inputs""" + txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs]) + code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt + self.outputs[0].write(code) + +def configure(self): + """ + Besides the configuration options, the environment variable QT5_ROOT may be used + to give the location of the qt5 libraries (absolute path). + + The detection uses the program ``pkg-config`` through :py:func:`waflib.Tools.config_c.check_cfg` + """ + if 'COMPILER_CXX' not in self.env: + self.fatal('No CXX compiler defined: did you forget to configure compiler_cxx first?') + + self.want_qt6 = getattr(self, 'want_qt6', False) + + if self.want_qt6: + self.qt_vars = Utils.to_list(getattr(self, 'qt6_vars', [])) + else: + self.qt_vars = Utils.to_list(getattr(self, 'qt5_vars', [])) + + self.find_qt5_binaries() + self.set_qt5_libs_dir() + self.set_qt5_libs_to_check() + self.set_qt5_defines() + self.find_qt5_libraries() + self.add_qt5_rpath() + self.simplify_qt5_libs() + + # warn about this during the configuration too + if not has_xml: + Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') + + feature = 'qt6' if self.want_qt6 else 'qt5' + # Qt6 requires C++17 (https://www.qt.io/blog/qt-6.0-released) + stdflag = '-std=c++17' if self.want_qt6 else '-std=c++11' + + # Qt5 may be compiled with '-reduce-relocations' which requires dependent programs to have -fPIE or -fPIC? + frag = '#include <QMap>\nint main(int argc, char **argv) {QMap<int,int> m;return m.keys().size();}\n' + uses = 'QT6CORE' if self.want_qt6 else 'QT5CORE' + for flag in [[], '-fPIE', '-fPIC', stdflag, [stdflag, '-fPIE'], [stdflag, '-fPIC']]: + msg = 'See if Qt files compile ' + if flag: + msg += 'with %s' % flag + try: + self.check(features=feature + ' cxx', use=uses, uselib_store=feature, cxxflags=flag, fragment=frag, msg=msg) + except self.errors.ConfigurationError: + pass + else: + break + else: + self.fatal('Could not build a simple Qt application') + + # FreeBSD does not add /usr/local/lib and the pkg-config files do not provide it either :-/ + if Utils.unversioned_sys_platform() == 'freebsd': + frag = '#include <QMap>\nint main(int argc, char **argv) {QMap<int,int> m;return m.keys().size();}\n' + try: + self.check(features=feature + ' cxx cxxprogram', use=uses, fragment=frag, msg='Can we link Qt programs on FreeBSD directly?') + except self.errors.ConfigurationError: + self.check(features=feature + ' cxx cxxprogram', use=uses, uselib_store=feature, libpath='/usr/local/lib', fragment=frag, msg='Is /usr/local/lib required?') + +@conf +def find_qt5_binaries(self): + """ + Detects Qt programs such as qmake, moc, uic, lrelease + """ + env = self.env + opt = Options.options + + qtdir = getattr(opt, 'qtdir', '') + qtbin = getattr(opt, 'qtbin', '') + qt_ver = '6' if self.want_qt6 else '5' + + paths = [] + + if qtdir: + qtbin = os.path.join(qtdir, 'bin') + + # the qt directory has been given from QT5_ROOT - deduce the qt binary path + if not qtdir: + qtdir = self.environ.get('QT' + qt_ver + '_ROOT', '') + qtbin = self.environ.get('QT' + qt_ver + '_BIN') or os.path.join(qtdir, 'bin') + + if qtbin: + paths = [qtbin] + + # no qtdir, look in the path and in /usr/local/Trolltech + if not qtdir: + paths = self.environ.get('PATH', '').split(os.pathsep) + paths.extend([ + '/usr/share/qt' + qt_ver + '/bin', + '/usr/local/lib/qt' + qt_ver + '/bin']) + + try: + lst = Utils.listdir('/usr/local/Trolltech/') + except OSError: + pass + else: + if lst: + lst.sort() + lst.reverse() + + # keep the highest version + qtdir = '/usr/local/Trolltech/%s/' % lst[0] + qtbin = os.path.join(qtdir, 'bin') + paths.append(qtbin) + + # at the end, try to find qmake in the paths given + # keep the one with the highest version + cand = None + prev_ver = ['0', '0', '0'] + qmake_vars = ['qmake-qt' + qt_ver, 'qmake' + qt_ver, 'qmake'] + + for qmk in qmake_vars: + try: + qmake = self.find_program(qmk, path_list=paths) + except self.errors.ConfigurationError: + pass + else: + try: + version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip() + except self.errors.WafError: + pass + else: + if version: + new_ver = version.split('.') + if new_ver[0] == qt_ver and new_ver > prev_ver: + cand = qmake + prev_ver = new_ver + + # qmake could not be found easily, rely on qtchooser + if not cand: + try: + self.find_program('qtchooser') + except self.errors.ConfigurationError: + pass + else: + cmd = self.env.QTCHOOSER + ['-qt=' + qt_ver, '-run-tool=qmake'] + try: + version = self.cmd_and_log(cmd + ['-query', 'QT_VERSION']) + except self.errors.WafError: + pass + else: + cand = cmd + + if cand: + self.env.QMAKE = cand + else: + self.fatal('Could not find qmake for qt' + qt_ver) + + # Once we have qmake, we want to query qmake for the paths where we want to look for tools instead + paths = [] + + self.env.QT_HOST_BINS = qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_HOST_BINS']).strip() + paths.append(qtbin) + + if self.want_qt6: + self.env.QT_HOST_LIBEXECS = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_HOST_LIBEXECS']).strip() + paths.append(self.env.QT_HOST_LIBEXECS) + + def find_bin(lst, var): + if var in env: + return + for f in lst: + try: + ret = self.find_program(f, path_list=paths) + except self.errors.ConfigurationError: + pass + else: + env[var]=ret + break + + find_bin(['uic-qt' + qt_ver, 'uic'], 'QT_UIC') + if not env.QT_UIC: + self.fatal('cannot find the uic compiler for qt' + qt_ver) + + self.start_msg('Checking for uic version') + uicver = self.cmd_and_log(env.QT_UIC + ['-version'], output=Context.BOTH) + uicver = ''.join(uicver).strip() + uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') + self.end_msg(uicver) + if uicver.find(' 3.') != -1 or uicver.find(' 4.') != -1 or (self.want_qt6 and uicver.find(' 5.') != -1): + if self.want_qt6: + self.fatal('this uic compiler is for qt3 or qt4 or qt5, add uic for qt6 to your path') + else: + self.fatal('this uic compiler is for qt3 or qt4, add uic for qt5 to your path') + + find_bin(['moc-qt' + qt_ver, 'moc'], 'QT_MOC') + find_bin(['rcc-qt' + qt_ver, 'rcc'], 'QT_RCC') + find_bin(['lrelease-qt' + qt_ver, 'lrelease'], 'QT_LRELEASE') + find_bin(['lupdate-qt' + qt_ver, 'lupdate'], 'QT_LUPDATE') + + env.UIC_ST = '%s -o %s' + env.MOC_ST = '-o' + env.ui_PATTERN = 'ui_%s.h' + env.QT_LRELEASE_FLAGS = ['-silent'] + env.MOCCPPPATH_ST = '-I%s' + env.MOCDEFINES_ST = '-D%s' + +@conf +def set_qt5_libs_dir(self): + env = self.env + qt_ver = '6' if self.want_qt6 else '5' + + qtlibs = getattr(Options.options, 'qtlibs', None) or self.environ.get('QT' + qt_ver + '_LIBDIR') + + if not qtlibs: + try: + qtlibs = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip() + except Errors.WafError: + qtdir = self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() + qtlibs = os.path.join(qtdir, 'lib') + + self.msg('Found the Qt' + qt_ver + ' library path', qtlibs) + + env.QTLIBS = qtlibs + +@conf +def find_single_qt5_lib(self, name, uselib, qtlibs, qtincludes, force_static): + env = self.env + qt_ver = '6' if self.want_qt6 else '5' + + if force_static: + exts = ('.a', '.lib') + prefix = 'STLIB' + else: + exts = ('.so', '.lib') + prefix = 'LIB' + + def lib_names(): + for x in exts: + for k in ('', qt_ver) if Utils.is_win32 else ['']: + for p in ('lib', ''): + yield (p, name, k, x) + + for tup in lib_names(): + k = ''.join(tup) + path = os.path.join(qtlibs, k) + if os.path.exists(path): + if env.DEST_OS == 'win32': + libval = ''.join(tup[:-1]) + else: + libval = name + env.append_unique(prefix + '_' + uselib, libval) + env.append_unique('%sPATH_%s' % (prefix, uselib), qtlibs) + env.append_unique('INCLUDES_' + uselib, qtincludes) + env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, name.replace('Qt' + qt_ver, 'Qt'))) + return k + return False + +@conf +def find_qt5_libraries(self): + env = self.env + qt_ver = '6' if self.want_qt6 else '5' + + qtincludes = self.environ.get('QT' + qt_ver + '_INCLUDES') or self.cmd_and_log(env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip() + force_static = self.environ.get('QT' + qt_ver + '_FORCE_STATIC') + + try: + if self.environ.get('QT' + qt_ver + '_XCOMPILE'): + self.fatal('QT' + qt_ver + '_XCOMPILE Disables pkg-config detection') + self.check_cfg(atleast_pkgconfig_version='0.1') + except self.errors.ConfigurationError: + for i in self.qt_vars: + uselib = i.upper() + if Utils.unversioned_sys_platform() == 'darwin': + # Since at least qt 4.7.3 each library locates in separate directory + fwk = i.replace('Qt' + qt_ver, 'Qt') + frameworkName = fwk + '.framework' + + qtDynamicLib = os.path.join(env.QTLIBS, frameworkName, fwk) + if os.path.exists(qtDynamicLib): + env.append_unique('FRAMEWORK_' + uselib, fwk) + env.append_unique('FRAMEWORKPATH_' + uselib, env.QTLIBS) + self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') + else: + self.msg('Checking for %s' % i, False, 'YELLOW') + env.append_unique('INCLUDES_' + uselib, os.path.join(env.QTLIBS, frameworkName, 'Headers')) + else: + ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, force_static) + if not force_static and not ret: + ret = self.find_single_qt5_lib(i, uselib, env.QTLIBS, qtincludes, True) + self.msg('Checking for %s' % i, ret, 'GREEN' if ret else 'YELLOW') + else: + path = '%s:%s:%s/pkgconfig:/usr/lib/qt%s/lib/pkgconfig:/opt/qt%s/lib/pkgconfig:/usr/lib/qt%s/lib:/opt/qt%s/lib' % ( + self.environ.get('PKG_CONFIG_PATH', ''), env.QTLIBS, env.QTLIBS, qt_ver, qt_ver, qt_ver, qt_ver) + for i in self.qt_vars: + self.check_cfg(package=i, args='--cflags --libs', mandatory=False, force_static=force_static, pkg_config_path=path) + +@conf +def simplify_qt5_libs(self): + """ + Since library paths make really long command-lines, + and since everything depends on qtcore, remove the qtcore ones from qtgui, etc + """ + env = self.env + def process_lib(vars_, coreval): + for d in vars_: + var = d.upper() + if var == 'QTCORE': + continue + + value = env['LIBPATH_'+var] + if value: + core = env[coreval] + accu = [] + for lib in value: + if lib in core: + continue + accu.append(lib) + env['LIBPATH_'+var] = accu + process_lib(self.qt_vars, 'LIBPATH_QTCORE') + +@conf +def add_qt5_rpath(self): + """ + Defines rpath entries for Qt libraries + """ + env = self.env + if getattr(Options.options, 'want_rpath', False): + def process_rpath(vars_, coreval): + for d in vars_: + var = d.upper() + value = env['LIBPATH_' + var] + if value: + core = env[coreval] + accu = [] + for lib in value: + if var != 'QTCORE': + if lib in core: + continue + accu.append('-Wl,--rpath='+lib) + env['RPATH_' + var] = accu + process_rpath(self.qt_vars, 'LIBPATH_QTCORE') + +@conf +def set_qt5_libs_to_check(self): + qt_ver = '6' if self.want_qt6 else '5' + + if not self.qt_vars: + dirlst = Utils.listdir(self.env.QTLIBS) + + pat = self.env.cxxshlib_PATTERN + if Utils.is_win32: + pat = pat.replace('.dll', '.lib') + if self.environ.get('QT' + qt_ver + '_FORCE_STATIC'): + pat = self.env.cxxstlib_PATTERN + if Utils.unversioned_sys_platform() == 'darwin': + pat = r"%s\.framework" + + # We only want to match Qt5 or Qt in the case of Qt5, in the case + # of Qt6 we want to match Qt6 or Qt. This speeds up configuration + # and reduces the chattiness of the configuration. Should also prevent + # possible misconfiguration. + if self.want_qt6: + re_qt = re.compile(pat % 'Qt6?(?!\\d)(?P<name>\\w+)' + '$') + else: + re_qt = re.compile(pat % 'Qt5?(?!\\d)(?P<name>\\w+)' + '$') + + for x in sorted(dirlst): + m = re_qt.match(x) + if m: + self.qt_vars.append("Qt%s%s" % (qt_ver, m.group('name'))) + if not self.qt_vars: + self.fatal('cannot find any Qt%s library (%r)' % (qt_ver, self.env.QTLIBS)) + + qtextralibs = getattr(Options.options, 'qtextralibs', None) + if qtextralibs: + self.qt_vars.extend(qtextralibs.split(',')) + +@conf +def set_qt5_defines(self): + qt_ver = '6' if self.want_qt6 else '5' + + if sys.platform != 'win32': + return + + for x in self.qt_vars: + y=x.replace('Qt' + qt_ver, 'Qt')[2:].upper() + self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y) + +def options(opt): + """ + Command-line options + """ + opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') + for i in 'qtdir qtbin qtlibs'.split(): + opt.add_option('--'+i, type='string', default='', dest=i) + + opt.add_option('--translate', action='store_true', help='collect translation strings', dest='trans_qt5', default=False) + opt.add_option('--qtextralibs', type='string', default='', dest='qtextralibs', help='additional qt libraries on the system to add to default ones, comma separated') + diff --git a/third_party/waf/waflib/Tools/ruby.py b/third_party/waf/waflib/Tools/ruby.py new file mode 100644 index 0000000..8d92a79 --- /dev/null +++ b/third_party/waf/waflib/Tools/ruby.py @@ -0,0 +1,186 @@ +#!/usr/bin/env python +# encoding: utf-8 +# daniel.svensson at purplescout.se 2008 +# Thomas Nagy 2016-2018 (ita) + +""" +Support for Ruby extensions. A C/C++ compiler is required:: + + def options(opt): + opt.load('compiler_c ruby') + def configure(conf): + conf.load('compiler_c ruby') + conf.check_ruby_version((1,8,0)) + conf.check_ruby_ext_devel() + conf.check_ruby_module('libxml') + def build(bld): + bld( + features = 'c cshlib rubyext', + source = 'rb_mytest.c', + target = 'mytest_ext', + install_path = '${ARCHDIR_RUBY}') + bld.install_files('${LIBDIR_RUBY}', 'Mytest.rb') +""" + +import os +from waflib import Errors, Options, Task, Utils +from waflib.TaskGen import before_method, feature, extension +from waflib.Configure import conf + +@feature('rubyext') +@before_method('apply_incpaths', 'process_source', 'apply_bundle', 'apply_link') +def init_rubyext(self): + """ + Add required variables for ruby extensions + """ + self.install_path = '${ARCHDIR_RUBY}' + self.uselib = self.to_list(getattr(self, 'uselib', '')) + if not 'RUBY' in self.uselib: + self.uselib.append('RUBY') + if not 'RUBYEXT' in self.uselib: + self.uselib.append('RUBYEXT') + +@feature('rubyext') +@before_method('apply_link', 'propagate_uselib_vars') +def apply_ruby_so_name(self): + """ + Strip the *lib* prefix from ruby extensions + """ + self.env.cshlib_PATTERN = self.env.cxxshlib_PATTERN = self.env.rubyext_PATTERN + +@conf +def check_ruby_version(self, minver=()): + """ + Checks if ruby is installed. + If installed the variable RUBY will be set in environment. + The ruby binary can be overridden by ``--with-ruby-binary`` command-line option. + """ + + ruby = self.find_program('ruby', var='RUBY', value=Options.options.rubybinary) + + try: + version = self.cmd_and_log(ruby + ['-e', 'puts defined?(VERSION) ? VERSION : RUBY_VERSION']).strip() + except Errors.WafError: + self.fatal('could not determine ruby version') + self.env.RUBY_VERSION = version + + try: + ver = tuple(map(int, version.split('.'))) + except Errors.WafError: + self.fatal('unsupported ruby version %r' % version) + + cver = '' + if minver: + cver = '> ' + '.'.join(str(x) for x in minver) + if ver < minver: + self.fatal('ruby is too old %r' % ver) + + self.msg('Checking for ruby version %s' % cver, version) + +@conf +def check_ruby_ext_devel(self): + """ + Check if a ruby extension can be created + """ + if not self.env.RUBY: + self.fatal('ruby detection is required first') + + if not self.env.CC_NAME and not self.env.CXX_NAME: + self.fatal('load a c/c++ compiler first') + + version = tuple(map(int, self.env.RUBY_VERSION.split("."))) + + def read_out(cmd): + return Utils.to_list(self.cmd_and_log(self.env.RUBY + ['-rrbconfig', '-e', cmd])) + + def read_config(key): + return read_out('puts RbConfig::CONFIG[%r]' % key) + + cpppath = archdir = read_config('archdir') + + if version >= (1, 9, 0): + ruby_hdrdir = read_config('rubyhdrdir') + cpppath += ruby_hdrdir + if version >= (2, 0, 0): + cpppath += read_config('rubyarchhdrdir') + cpppath += [os.path.join(ruby_hdrdir[0], read_config('arch')[0])] + + self.check(header_name='ruby.h', includes=cpppath, errmsg='could not find ruby header file', link_header_test=False) + + self.env.LIBPATH_RUBYEXT = read_config('libdir') + self.env.LIBPATH_RUBYEXT += archdir + self.env.INCLUDES_RUBYEXT = cpppath + self.env.CFLAGS_RUBYEXT = read_config('CCDLFLAGS') + self.env.rubyext_PATTERN = '%s.' + read_config('DLEXT')[0] + + # ok this is really stupid, but the command and flags are combined. + # so we try to find the first argument... + flags = read_config('LDSHARED') + while flags and flags[0][0] != '-': + flags = flags[1:] + + # we also want to strip out the deprecated ppc flags + if len(flags) > 1 and flags[1] == "ppc": + flags = flags[2:] + + self.env.LINKFLAGS_RUBYEXT = flags + self.env.LINKFLAGS_RUBYEXT += read_config('LIBS') + self.env.LINKFLAGS_RUBYEXT += read_config('LIBRUBYARG_SHARED') + + if Options.options.rubyarchdir: + self.env.ARCHDIR_RUBY = Options.options.rubyarchdir + else: + self.env.ARCHDIR_RUBY = read_config('sitearchdir')[0] + + if Options.options.rubylibdir: + self.env.LIBDIR_RUBY = Options.options.rubylibdir + else: + self.env.LIBDIR_RUBY = read_config('sitelibdir')[0] + +@conf +def check_ruby_module(self, module_name): + """ + Check if the selected ruby interpreter can require the given ruby module:: + + def configure(conf): + conf.check_ruby_module('libxml') + + :param module_name: module + :type module_name: string + """ + self.start_msg('Ruby module %s' % module_name) + try: + self.cmd_and_log(self.env.RUBY + ['-e', 'require \'%s\';puts 1' % module_name]) + except Errors.WafError: + self.end_msg(False) + self.fatal('Could not find the ruby module %r' % module_name) + self.end_msg(True) + +@extension('.rb') +def process(self, node): + return self.create_task('run_ruby', node) + +class run_ruby(Task.Task): + """ + Task to run ruby files detected by file extension .rb:: + + def options(opt): + opt.load('ruby') + + def configure(ctx): + ctx.check_ruby_version() + + def build(bld): + bld.env.RBFLAGS = '-e puts "hello world"' + bld(source='a_ruby_file.rb') + """ + run_str = '${RUBY} ${RBFLAGS} -I ${SRC[0].parent.abspath()} ${SRC}' + +def options(opt): + """ + Add the ``--with-ruby-archdir``, ``--with-ruby-libdir`` and ``--with-ruby-binary`` options + """ + opt.add_option('--with-ruby-archdir', type='string', dest='rubyarchdir', help='Specify directory where to install arch specific files') + opt.add_option('--with-ruby-libdir', type='string', dest='rubylibdir', help='Specify alternate ruby library path') + opt.add_option('--with-ruby-binary', type='string', dest='rubybinary', help='Specify alternate ruby binary') + diff --git a/third_party/waf/waflib/Tools/suncc.py b/third_party/waf/waflib/Tools/suncc.py new file mode 100644 index 0000000..33d34fc --- /dev/null +++ b/third_party/waf/waflib/Tools/suncc.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) + +from waflib import Errors +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_scc(conf): + """ + Detects the Sun C compiler + """ + v = conf.env + cc = conf.find_program('cc', var='CC') + try: + conf.cmd_and_log(cc + ['-flags']) + except Errors.WafError: + conf.fatal('%r is not a Sun compiler' % cc) + v.CC_NAME = 'sun' + conf.get_suncc_version(cc) + +@conf +def scc_common_flags(conf): + """ + Flags required for executing the sun C compiler + """ + v = conf.env + + v.CC_SRC_F = [] + v.CC_TGT_F = ['-c', '-o', ''] + + if not v.LINK_CC: + v.LINK_CC = v.CC + + v.CCLNK_SRC_F = '' + v.CCLNK_TGT_F = ['-o', ''] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + + v.SONAME_ST = '-Wl,-h,%s' + v.SHLIB_MARKER = '-Bdynamic' + v.STLIB_MARKER = '-Bstatic' + + v.cprogram_PATTERN = '%s' + + v.CFLAGS_cshlib = ['-xcode=pic32', '-DPIC'] + v.LINKFLAGS_cshlib = ['-G'] + v.cshlib_PATTERN = 'lib%s.so' + + v.LINKFLAGS_cstlib = ['-Bstatic'] + v.cstlib_PATTERN = 'lib%s.a' + +def configure(conf): + conf.find_scc() + conf.find_ar() + conf.scc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Tools/suncxx.py b/third_party/waf/waflib/Tools/suncxx.py new file mode 100644 index 0000000..3b384f6 --- /dev/null +++ b/third_party/waf/waflib/Tools/suncxx.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) + +from waflib import Errors +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_sxx(conf): + """ + Detects the sun C++ compiler + """ + v = conf.env + cc = conf.find_program(['CC', 'c++'], var='CXX') + try: + conf.cmd_and_log(cc + ['-flags']) + except Errors.WafError: + conf.fatal('%r is not a Sun compiler' % cc) + v.CXX_NAME = 'sun' + conf.get_suncc_version(cc) + +@conf +def sxx_common_flags(conf): + """ + Flags required for executing the sun C++ compiler + """ + v = conf.env + + v.CXX_SRC_F = [] + v.CXX_TGT_F = ['-c', '-o', ''] + + if not v.LINK_CXX: + v.LINK_CXX = v.CXX + + v.CXXLNK_SRC_F = [] + v.CXXLNK_TGT_F = ['-o', ''] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + + v.SONAME_ST = '-Wl,-h,%s' + v.SHLIB_MARKER = '-Bdynamic' + v.STLIB_MARKER = '-Bstatic' + + v.cxxprogram_PATTERN = '%s' + + v.CXXFLAGS_cxxshlib = ['-xcode=pic32', '-DPIC'] + v.LINKFLAGS_cxxshlib = ['-G'] + v.cxxshlib_PATTERN = 'lib%s.so' + + v.LINKFLAGS_cxxstlib = ['-Bstatic'] + v.cxxstlib_PATTERN = 'lib%s.a' + +def configure(conf): + conf.find_sxx() + conf.find_ar() + conf.sxx_common_flags() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Tools/tex.py b/third_party/waf/waflib/Tools/tex.py new file mode 100644 index 0000000..b4792c3 --- /dev/null +++ b/third_party/waf/waflib/Tools/tex.py @@ -0,0 +1,544 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) + +""" +TeX/LaTeX/PDFLaTeX/XeLaTeX support + +Example:: + + def configure(conf): + conf.load('tex') + if not conf.env.LATEX: + conf.fatal('The program LaTex is required') + + def build(bld): + bld( + features = 'tex', + type = 'latex', # pdflatex or xelatex + source = 'document.ltx', # mandatory, the source + outs = 'ps', # 'pdf' or 'ps pdf' + deps = 'crossreferencing.lst', # to give dependencies directly + prompt = 1, # 0 for the batch mode + ) + +Notes: + +- To configure with a special program, use:: + + $ PDFLATEX=luatex waf configure + +- This tool does not use the target attribute of the task generator + (``bld(target=...)``); the target file name is built from the source + base name and the output type(s) +""" + +import os, re +from waflib import Utils, Task, Errors, Logs, Node +from waflib.TaskGen import feature, before_method + +re_bibunit = re.compile(r'\\(?P<type>putbib)\[(?P<file>[^\[\]]*)\]',re.M) +def bibunitscan(self): + """ + Parses TeX inputs and try to find the *bibunit* file dependencies + + :return: list of bibunit files + :rtype: list of :py:class:`waflib.Node.Node` + """ + node = self.inputs[0] + + nodes = [] + if not node: + return nodes + + code = node.read() + for match in re_bibunit.finditer(code): + path = match.group('file') + if path: + found = None + for k in ('', '.bib'): + # add another loop for the tex include paths? + Logs.debug('tex: trying %s%s', path, k) + fi = node.parent.find_resource(path + k) + if fi: + found = True + nodes.append(fi) + # no break + if not found: + Logs.debug('tex: could not find %s', path) + + Logs.debug('tex: found the following bibunit files: %s', nodes) + return nodes + +exts_deps_tex = ['', '.ltx', '.tex', '.bib', '.pdf', '.png', '.eps', '.ps', '.sty'] +"""List of typical file extensions included in latex files""" + +exts_tex = ['.ltx', '.tex'] +"""List of typical file extensions that contain latex""" + +re_tex = re.compile(r'\\(?P<type>usepackage|RequirePackage|include|bibliography([^\[\]{}]*)|putbib|includegraphics|input|import|bringin|lstinputlisting)(\[[^\[\]]*\])?{(?P<file>[^{}]*)}',re.M) +"""Regexp for expressions that may include latex files""" + +g_bibtex_re = re.compile('bibdata', re.M) +"""Regexp for bibtex files""" + +g_glossaries_re = re.compile('\\@newglossary', re.M) +"""Regexp for expressions that create glossaries""" + +class tex(Task.Task): + """ + Compiles a tex/latex file. + + .. inheritance-diagram:: waflib.Tools.tex.latex waflib.Tools.tex.xelatex waflib.Tools.tex.pdflatex + :top-classes: waflib.Tools.tex.tex + """ + + bibtex_fun, _ = Task.compile_fun('${BIBTEX} ${BIBTEXFLAGS} ${SRCFILE}', shell=False) + bibtex_fun.__doc__ = """ + Execute the program **bibtex** + """ + + makeindex_fun, _ = Task.compile_fun('${MAKEINDEX} ${MAKEINDEXFLAGS} ${SRCFILE}', shell=False) + makeindex_fun.__doc__ = """ + Execute the program **makeindex** + """ + + makeglossaries_fun, _ = Task.compile_fun('${MAKEGLOSSARIES} ${SRCFILE}', shell=False) + makeglossaries_fun.__doc__ = """ + Execute the program **makeglossaries** + """ + + def exec_command(self, cmd, **kw): + """ + Executes TeX commands without buffering (latex may prompt for inputs) + + :return: the return code + :rtype: int + """ + if self.env.PROMPT_LATEX: + # capture the outputs in configuration tests + kw['stdout'] = kw['stderr'] = None + return super(tex, self).exec_command(cmd, **kw) + + def scan_aux(self, node): + """ + Recursive regex-based scanner that finds included auxiliary files. + """ + nodes = [node] + re_aux = re.compile(r'\\@input{(?P<file>[^{}]*)}', re.M) + + def parse_node(node): + code = node.read() + for match in re_aux.finditer(code): + path = match.group('file') + found = node.parent.find_or_declare(path) + if found and found not in nodes: + Logs.debug('tex: found aux node %r', found) + nodes.append(found) + parse_node(found) + parse_node(node) + return nodes + + def scan(self): + """ + Recursive regex-based scanner that finds latex dependencies. It uses :py:attr:`waflib.Tools.tex.re_tex` + + Depending on your needs you might want: + + * to change re_tex:: + + from waflib.Tools import tex + tex.re_tex = myregex + + * or to change the method scan from the latex tasks:: + + from waflib.Task import classes + classes['latex'].scan = myscanfunction + """ + node = self.inputs[0] + + nodes = [] + names = [] + seen = [] + if not node: + return (nodes, names) + + def parse_node(node): + if node in seen: + return + seen.append(node) + code = node.read() + for match in re_tex.finditer(code): + + multibib = match.group('type') + if multibib and multibib.startswith('bibliography'): + multibib = multibib[len('bibliography'):] + if multibib.startswith('style'): + continue + else: + multibib = None + + for path in match.group('file').split(','): + if path: + add_name = True + found = None + for k in exts_deps_tex: + + # issue 1067, scan in all texinputs folders + for up in self.texinputs_nodes: + Logs.debug('tex: trying %s%s', path, k) + found = up.find_resource(path + k) + if found: + break + + + for tsk in self.generator.tasks: + if not found or found in tsk.outputs: + break + else: + nodes.append(found) + add_name = False + for ext in exts_tex: + if found.name.endswith(ext): + parse_node(found) + break + + # multibib stuff + if found and multibib and found.name.endswith('.bib'): + try: + self.multibibs.append(found) + except AttributeError: + self.multibibs = [found] + + # no break, people are crazy + if add_name: + names.append(path) + parse_node(node) + + for x in nodes: + x.parent.get_bld().mkdir() + + Logs.debug("tex: found the following : %s and names %s", nodes, names) + return (nodes, names) + + def check_status(self, msg, retcode): + """ + Checks an exit status and raise an error with a particular message + + :param msg: message to display if the code is non-zero + :type msg: string + :param retcode: condition + :type retcode: boolean + """ + if retcode != 0: + raise Errors.WafError('%r command exit status %r' % (msg, retcode)) + + def info(self, *k, **kw): + try: + info = self.generator.bld.conf.logger.info + except AttributeError: + info = Logs.info + info(*k, **kw) + + def bibfile(self): + """ + Parses *.aux* files to find bibfiles to process. + If present, execute :py:meth:`waflib.Tools.tex.tex.bibtex_fun` + """ + for aux_node in self.aux_nodes: + try: + ct = aux_node.read() + except EnvironmentError: + Logs.error('Error reading %s: %r', aux_node.abspath()) + continue + + if g_bibtex_re.findall(ct): + self.info('calling bibtex') + + self.env.env = {} + self.env.env.update(os.environ) + self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) + self.env.SRCFILE = aux_node.name[:-4] + self.check_status('error when calling bibtex', self.bibtex_fun()) + + for node in getattr(self, 'multibibs', []): + self.env.env = {} + self.env.env.update(os.environ) + self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) + self.env.SRCFILE = node.name[:-4] + self.check_status('error when calling bibtex', self.bibtex_fun()) + + def bibunits(self): + """ + Parses *.aux* file to find bibunit files. If there are bibunit files, + runs :py:meth:`waflib.Tools.tex.tex.bibtex_fun`. + """ + try: + bibunits = bibunitscan(self) + except OSError: + Logs.error('error bibunitscan') + else: + if bibunits: + fn = ['bu' + str(i) for i in range(1, len(bibunits) + 1)] + if fn: + self.info('calling bibtex on bibunits') + + for f in fn: + self.env.env = {'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()} + self.env.SRCFILE = f + self.check_status('error when calling bibtex', self.bibtex_fun()) + + def makeindex(self): + """ + Searches the filesystem for *.idx* files to process. If present, + runs :py:meth:`waflib.Tools.tex.tex.makeindex_fun` + """ + self.idx_node = self.inputs[0].change_ext('.idx') + try: + idx_path = self.idx_node.abspath() + os.stat(idx_path) + except OSError: + self.info('index file %s absent, not calling makeindex', idx_path) + else: + self.info('calling makeindex') + + self.env.SRCFILE = self.idx_node.name + self.env.env = {} + self.check_status('error when calling makeindex %s' % idx_path, self.makeindex_fun()) + + def bibtopic(self): + """ + Lists additional .aux files from the bibtopic package + """ + p = self.inputs[0].parent.get_bld() + if os.path.exists(os.path.join(p.abspath(), 'btaux.aux')): + self.aux_nodes += p.ant_glob('*[0-9].aux') + + def makeglossaries(self): + """ + Lists additional glossaries from .aux files. If present, runs the makeglossaries program. + """ + src_file = self.inputs[0].abspath() + base_file = os.path.basename(src_file) + base, _ = os.path.splitext(base_file) + for aux_node in self.aux_nodes: + try: + ct = aux_node.read() + except EnvironmentError: + Logs.error('Error reading %s: %r', aux_node.abspath()) + continue + + if g_glossaries_re.findall(ct): + if not self.env.MAKEGLOSSARIES: + raise Errors.WafError("The program 'makeglossaries' is missing!") + Logs.warn('calling makeglossaries') + self.env.SRCFILE = base + self.check_status('error when calling makeglossaries %s' % base, self.makeglossaries_fun()) + return + + def texinputs(self): + """ + Returns the list of texinput nodes as a string suitable for the TEXINPUTS environment variables + + :rtype: string + """ + return os.pathsep.join([k.abspath() for k in self.texinputs_nodes]) + os.pathsep + + def run(self): + """ + Runs the whole TeX build process + + Multiple passes are required depending on the usage of cross-references, + bibliographies, glossaries, indexes and additional contents + The appropriate TeX compiler is called until the *.aux* files stop changing. + """ + env = self.env + + if not env.PROMPT_LATEX: + env.append_value('LATEXFLAGS', '-interaction=batchmode') + env.append_value('PDFLATEXFLAGS', '-interaction=batchmode') + env.append_value('XELATEXFLAGS', '-interaction=batchmode') + + # important, set the cwd for everybody + self.cwd = self.inputs[0].parent.get_bld() + + self.info('first pass on %s', self.__class__.__name__) + + # Hash .aux files before even calling the LaTeX compiler + cur_hash = self.hash_aux_nodes() + + self.call_latex() + + # Find the .aux files again since bibtex processing can require it + self.hash_aux_nodes() + + self.bibtopic() + self.bibfile() + self.bibunits() + self.makeindex() + self.makeglossaries() + + for i in range(10): + # There is no need to call latex again if the .aux hash value has not changed + prev_hash = cur_hash + cur_hash = self.hash_aux_nodes() + if not cur_hash: + Logs.error('No aux.h to process') + if cur_hash and cur_hash == prev_hash: + break + + # run the command + self.info('calling %s', self.__class__.__name__) + self.call_latex() + + def hash_aux_nodes(self): + """ + Returns a hash of the .aux file contents + + :rtype: string or bytes + """ + try: + self.aux_nodes + except AttributeError: + try: + self.aux_nodes = self.scan_aux(self.inputs[0].change_ext('.aux')) + except IOError: + return None + return Utils.h_list([Utils.h_file(x.abspath()) for x in self.aux_nodes]) + + def call_latex(self): + """ + Runs the TeX compiler once + """ + self.env.env = {} + self.env.env.update(os.environ) + self.env.env.update({'TEXINPUTS': self.texinputs()}) + self.env.SRCFILE = self.inputs[0].abspath() + self.check_status('error when calling latex', self.texfun()) + +class latex(tex): + "Compiles LaTeX files" + texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) + +class pdflatex(tex): + "Compiles PdfLaTeX files" + texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) + +class xelatex(tex): + "XeLaTeX files" + texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) + +class dvips(Task.Task): + "Converts dvi files to postscript" + run_str = '${DVIPS} ${DVIPSFLAGS} ${SRC} -o ${TGT}' + color = 'BLUE' + after = ['latex', 'pdflatex', 'xelatex'] + +class dvipdf(Task.Task): + "Converts dvi files to pdf" + run_str = '${DVIPDF} ${DVIPDFFLAGS} ${SRC} ${TGT}' + color = 'BLUE' + after = ['latex', 'pdflatex', 'xelatex'] + +class pdf2ps(Task.Task): + "Converts pdf files to postscript" + run_str = '${PDF2PS} ${PDF2PSFLAGS} ${SRC} ${TGT}' + color = 'BLUE' + after = ['latex', 'pdflatex', 'xelatex'] + +@feature('tex') +@before_method('process_source') +def apply_tex(self): + """ + Creates :py:class:`waflib.Tools.tex.tex` objects, and + dvips/dvipdf/pdf2ps tasks if necessary (outs='ps', etc). + """ + if not getattr(self, 'type', None) in ('latex', 'pdflatex', 'xelatex'): + self.type = 'pdflatex' + + outs = Utils.to_list(getattr(self, 'outs', [])) + + # prompt for incomplete files (else the batchmode is used) + try: + self.generator.bld.conf + except AttributeError: + default_prompt = False + else: + default_prompt = True + self.env.PROMPT_LATEX = getattr(self, 'prompt', default_prompt) + + deps_lst = [] + + if getattr(self, 'deps', None): + deps = self.to_list(self.deps) + for dep in deps: + if isinstance(dep, str): + n = self.path.find_resource(dep) + if not n: + self.bld.fatal('Could not find %r for %r' % (dep, self)) + if not n in deps_lst: + deps_lst.append(n) + elif isinstance(dep, Node.Node): + deps_lst.append(dep) + + for node in self.to_nodes(self.source): + if self.type == 'latex': + task = self.create_task('latex', node, node.change_ext('.dvi')) + elif self.type == 'pdflatex': + task = self.create_task('pdflatex', node, node.change_ext('.pdf')) + elif self.type == 'xelatex': + task = self.create_task('xelatex', node, node.change_ext('.pdf')) + + task.env = self.env + + # add the manual dependencies + if deps_lst: + for n in deps_lst: + if not n in task.dep_nodes: + task.dep_nodes.append(n) + + # texinputs is a nasty beast + if hasattr(self, 'texinputs_nodes'): + task.texinputs_nodes = self.texinputs_nodes + else: + task.texinputs_nodes = [node.parent, node.parent.get_bld(), self.path, self.path.get_bld()] + lst = os.environ.get('TEXINPUTS', '') + if self.env.TEXINPUTS: + lst += os.pathsep + self.env.TEXINPUTS + if lst: + lst = lst.split(os.pathsep) + for x in lst: + if x: + if os.path.isabs(x): + p = self.bld.root.find_node(x) + if p: + task.texinputs_nodes.append(p) + else: + Logs.error('Invalid TEXINPUTS folder %s', x) + else: + Logs.error('Cannot resolve relative paths in TEXINPUTS %s', x) + + if self.type == 'latex': + if 'ps' in outs: + tsk = self.create_task('dvips', task.outputs, node.change_ext('.ps')) + tsk.env.env = dict(os.environ) + if 'pdf' in outs: + tsk = self.create_task('dvipdf', task.outputs, node.change_ext('.pdf')) + tsk.env.env = dict(os.environ) + elif self.type == 'pdflatex': + if 'ps' in outs: + self.create_task('pdf2ps', task.outputs, node.change_ext('.ps')) + self.source = [] + +def configure(self): + """ + Find the programs tex, latex and others without raising errors. + """ + v = self.env + for p in 'tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps makeglossaries'.split(): + try: + self.find_program(p, var=p.upper()) + except self.errors.ConfigurationError: + pass + v.DVIPSFLAGS = '-Ppdf' + diff --git a/third_party/waf/waflib/Tools/vala.py b/third_party/waf/waflib/Tools/vala.py new file mode 100644 index 0000000..822ec50 --- /dev/null +++ b/third_party/waf/waflib/Tools/vala.py @@ -0,0 +1,355 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Ali Sabil, 2007 +# Radosław Szkodziński, 2010 + +""" +At this point, vala is still unstable, so do not expect +this tool to be too stable either (apis, etc) +""" + +import re +from waflib import Build, Context, Errors, Logs, Node, Options, Task, Utils +from waflib.TaskGen import extension, taskgen_method +from waflib.Configure import conf + +class valac(Task.Task): + """ + Compiles vala files + """ + #run_str = "${VALAC} ${VALAFLAGS}" # ideally + #vars = ['VALAC_VERSION'] + vars = ["VALAC", "VALAC_VERSION", "VALAFLAGS"] + ext_out = ['.h'] + + def run(self): + cmd = self.env.VALAC + self.env.VALAFLAGS + resources = getattr(self, 'vala_exclude', []) + cmd.extend([a.abspath() for a in self.inputs if a not in resources]) + ret = self.exec_command(cmd, cwd=self.vala_dir_node.abspath()) + + if ret: + return ret + + if self.generator.dump_deps_node: + self.generator.dump_deps_node.write('\n'.join(self.generator.packages)) + + return ret + +@taskgen_method +def init_vala_task(self): + """ + Initializes the vala task with the relevant data (acts as a constructor) + """ + self.profile = getattr(self, 'profile', 'gobject') + + self.packages = packages = Utils.to_list(getattr(self, 'packages', [])) + self.use = Utils.to_list(getattr(self, 'use', [])) + if packages and not self.use: + self.use = packages[:] # copy + + if self.profile == 'gobject': + if not 'GOBJECT' in self.use: + self.use.append('GOBJECT') + + def addflags(flags): + self.env.append_value('VALAFLAGS', flags) + + if self.profile: + addflags('--profile=%s' % self.profile) + + valatask = self.valatask + + # output directory + if hasattr(self, 'vala_dir'): + if isinstance(self.vala_dir, str): + valatask.vala_dir_node = self.path.get_bld().make_node(self.vala_dir) + try: + valatask.vala_dir_node.mkdir() + except OSError: + raise self.bld.fatal('Cannot create the vala dir %r' % valatask.vala_dir_node) + else: + valatask.vala_dir_node = self.vala_dir + else: + valatask.vala_dir_node = self.path.get_bld() + addflags('--directory=%s' % valatask.vala_dir_node.abspath()) + + if hasattr(self, 'thread'): + if self.profile == 'gobject': + if not 'GTHREAD' in self.use: + self.use.append('GTHREAD') + else: + #Vala doesn't have threading support for dova nor posix + Logs.warn('Profile %s means no threading support', self.profile) + self.thread = False + + if self.thread: + addflags('--thread') + + self.is_lib = 'cprogram' not in self.features + if self.is_lib: + addflags('--library=%s' % self.target) + + h_node = valatask.vala_dir_node.find_or_declare('%s.h' % self.target) + valatask.outputs.append(h_node) + addflags('--header=%s' % h_node.name) + + valatask.outputs.append(valatask.vala_dir_node.find_or_declare('%s.vapi' % self.target)) + + if getattr(self, 'gir', None): + gir_node = valatask.vala_dir_node.find_or_declare('%s.gir' % self.gir) + addflags('--gir=%s' % gir_node.name) + valatask.outputs.append(gir_node) + + self.vala_target_glib = getattr(self, 'vala_target_glib', getattr(Options.options, 'vala_target_glib', None)) + if self.vala_target_glib: + addflags('--target-glib=%s' % self.vala_target_glib) + + addflags(['--define=%s' % x for x in Utils.to_list(getattr(self, 'vala_defines', []))]) + + packages_private = Utils.to_list(getattr(self, 'packages_private', [])) + addflags(['--pkg=%s' % x for x in packages_private]) + + def _get_api_version(): + api_version = '1.0' + if hasattr(Context.g_module, 'API_VERSION'): + version = Context.g_module.API_VERSION.split(".") + if version[0] == "0": + api_version = "0." + version[1] + else: + api_version = version[0] + ".0" + return api_version + + self.includes = Utils.to_list(getattr(self, 'includes', [])) + valatask.install_path = getattr(self, 'install_path', '') + + valatask.vapi_path = getattr(self, 'vapi_path', '${DATAROOTDIR}/vala/vapi') + valatask.pkg_name = getattr(self, 'pkg_name', self.env.PACKAGE) + valatask.header_path = getattr(self, 'header_path', '${INCLUDEDIR}/%s-%s' % (valatask.pkg_name, _get_api_version())) + valatask.install_binding = getattr(self, 'install_binding', True) + + self.vapi_dirs = vapi_dirs = Utils.to_list(getattr(self, 'vapi_dirs', [])) + #includes = [] + + if hasattr(self, 'use'): + local_packages = Utils.to_list(self.use)[:] # make sure to have a copy + seen = [] + while len(local_packages) > 0: + package = local_packages.pop() + if package in seen: + continue + seen.append(package) + + # check if the package exists + try: + package_obj = self.bld.get_tgen_by_name(package) + except Errors.WafError: + continue + + # in practice the other task is already processed + # but this makes it explicit + package_obj.post() + package_name = package_obj.target + task = getattr(package_obj, 'valatask', None) + if task: + for output in task.outputs: + if output.name == package_name + ".vapi": + valatask.set_run_after(task) + if package_name not in packages: + packages.append(package_name) + if output.parent not in vapi_dirs: + vapi_dirs.append(output.parent) + if output.parent not in self.includes: + self.includes.append(output.parent) + + if hasattr(package_obj, 'use'): + lst = self.to_list(package_obj.use) + lst.reverse() + local_packages = [pkg for pkg in lst if pkg not in seen] + local_packages + + addflags(['--pkg=%s' % p for p in packages]) + + for vapi_dir in vapi_dirs: + if isinstance(vapi_dir, Node.Node): + v_node = vapi_dir + else: + v_node = self.path.find_dir(vapi_dir) + if not v_node: + Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) + else: + addflags('--vapidir=%s' % v_node.abspath()) + + self.dump_deps_node = None + if self.is_lib and self.packages: + self.dump_deps_node = valatask.vala_dir_node.find_or_declare('%s.deps' % self.target) + valatask.outputs.append(self.dump_deps_node) + + if self.is_lib and valatask.install_binding: + headers_list = [o for o in valatask.outputs if o.suffix() == ".h"] + if headers_list: + self.install_vheader = self.add_install_files(install_to=valatask.header_path, install_from=headers_list) + + vapi_list = [o for o in valatask.outputs if (o.suffix() in (".vapi", ".deps"))] + if vapi_list: + self.install_vapi = self.add_install_files(install_to=valatask.vapi_path, install_from=vapi_list) + + gir_list = [o for o in valatask.outputs if o.suffix() == '.gir'] + if gir_list: + self.install_gir = self.add_install_files( + install_to=getattr(self, 'gir_path', '${DATAROOTDIR}/gir-1.0'), install_from=gir_list) + + if hasattr(self, 'vala_resources'): + nodes = self.to_nodes(self.vala_resources) + valatask.vala_exclude = getattr(valatask, 'vala_exclude', []) + nodes + valatask.inputs.extend(nodes) + for x in nodes: + addflags(['--gresources', x.abspath()]) + +@extension('.vala', '.gs') +def vala_file(self, node): + """ + Compile a vala file and bind the task to *self.valatask*. If an existing vala task is already set, add the node + to its inputs. The typical example is:: + + def build(bld): + bld.program( + packages = 'gtk+-2.0', + target = 'vala-gtk-example', + use = 'GTK GLIB', + source = 'vala-gtk-example.vala foo.vala', + vala_defines = ['DEBUG'] # adds --define=<xyz> values to the command-line + + # the following arguments are for libraries + #gir = 'hello-1.0', + #gir_path = '/tmp', + #vapi_path = '/tmp', + #pkg_name = 'hello' + # disable installing of gir, vapi and header + #install_binding = False + + # profile = 'xyz' # adds --profile=<xyz> to enable profiling + # thread = True, # adds --thread, except if profile is on or not on 'gobject' + # vala_target_glib = 'xyz' # adds --target-glib=<xyz>, can be given through the command-line option --vala-target-glib=<xyz> + ) + + + :param node: vala file + :type node: :py:class:`waflib.Node.Node` + """ + + try: + valatask = self.valatask + except AttributeError: + valatask = self.valatask = self.create_task('valac') + self.init_vala_task() + + valatask.inputs.append(node) + name = node.name[:node.name.rfind('.')] + '.c' + c_node = valatask.vala_dir_node.find_or_declare(name) + valatask.outputs.append(c_node) + self.source.append(c_node) + +@extension('.vapi') +def vapi_file(self, node): + try: + valatask = self.valatask + except AttributeError: + valatask = self.valatask = self.create_task('valac') + self.init_vala_task() + valatask.inputs.append(node) + +@conf +def find_valac(self, valac_name, min_version): + """ + Find the valac program, and execute it to store the version + number in *conf.env.VALAC_VERSION* + + :param valac_name: program name + :type valac_name: string or list of string + :param min_version: minimum version acceptable + :type min_version: tuple of int + """ + valac = self.find_program(valac_name, var='VALAC') + try: + output = self.cmd_and_log(valac + ['--version']) + except Errors.WafError: + valac_version = None + else: + ver = re.search(r'\d+.\d+.\d+', output).group().split('.') + valac_version = tuple([int(x) for x in ver]) + + self.msg('Checking for %s version >= %r' % (valac_name, min_version), + valac_version, valac_version and valac_version >= min_version) + if valac and valac_version < min_version: + self.fatal("%s version %r is too old, need >= %r" % (valac_name, valac_version, min_version)) + + self.env.VALAC_VERSION = valac_version + return valac + +@conf +def check_vala(self, min_version=(0,8,0), branch=None): + """ + Check if vala compiler from a given branch exists of at least a given + version. + + :param min_version: minimum version acceptable (0.8.0) + :type min_version: tuple + :param branch: first part of the version number, in case a snapshot is used (0, 8) + :type branch: tuple of int + """ + if self.env.VALA_MINVER: + min_version = self.env.VALA_MINVER + if self.env.VALA_MINVER_BRANCH: + branch = self.env.VALA_MINVER_BRANCH + if not branch: + branch = min_version[:2] + try: + find_valac(self, 'valac-%d.%d' % (branch[0], branch[1]), min_version) + except self.errors.ConfigurationError: + find_valac(self, 'valac', min_version) + +@conf +def check_vala_deps(self): + """ + Load the gobject and gthread packages if they are missing. + """ + if not self.env.HAVE_GOBJECT: + pkg_args = {'package': 'gobject-2.0', + 'uselib_store': 'GOBJECT', + 'args': '--cflags --libs'} + if getattr(Options.options, 'vala_target_glib', None): + pkg_args['atleast_version'] = Options.options.vala_target_glib + self.check_cfg(**pkg_args) + + if not self.env.HAVE_GTHREAD: + pkg_args = {'package': 'gthread-2.0', + 'uselib_store': 'GTHREAD', + 'args': '--cflags --libs'} + if getattr(Options.options, 'vala_target_glib', None): + pkg_args['atleast_version'] = Options.options.vala_target_glib + self.check_cfg(**pkg_args) + +def configure(self): + """ + Use the following to enforce minimum vala version:: + + def configure(conf): + conf.env.VALA_MINVER = (0, 10, 0) + conf.load('vala') + """ + self.load('gnu_dirs') + self.check_vala_deps() + self.check_vala() + self.add_os_flags('VALAFLAGS') + self.env.append_unique('VALAFLAGS', ['-C']) + +def options(opt): + """ + Load the :py:mod:`waflib.Tools.gnu_dirs` tool and add the ``--vala-target-glib`` command-line option + """ + opt.load('gnu_dirs') + valaopts = opt.add_option_group('Vala Compiler Options') + valaopts.add_option('--vala-target-glib', default=None, + dest='vala_target_glib', metavar='MAJOR.MINOR', + help='Target version of glib for Vala GObject code generation') + diff --git a/third_party/waf/waflib/Tools/waf_unit_test.py b/third_party/waf/waflib/Tools/waf_unit_test.py new file mode 100644 index 0000000..8cff89b --- /dev/null +++ b/third_party/waf/waflib/Tools/waf_unit_test.py @@ -0,0 +1,302 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Carlos Rafael Giani, 2006 +# Thomas Nagy, 2010-2018 (ita) + +""" +Unit testing system for C/C++/D and interpreted languages providing test execution: + +* in parallel, by using ``waf -j`` +* partial (only the tests that have changed) or full (by using ``waf --alltests``) + +The tests are declared by adding the **test** feature to programs:: + + def options(opt): + opt.load('compiler_cxx waf_unit_test') + def configure(conf): + conf.load('compiler_cxx waf_unit_test') + def build(bld): + bld(features='cxx cxxprogram test', source='main.cpp', target='app') + # or + bld.program(features='test', source='main2.cpp', target='app2') + +When the build is executed, the program 'test' will be built and executed without arguments. +The success/failure is detected by looking at the return code. The status and the standard output/error +are stored on the build context. + +The results can be displayed by registering a callback function. Here is how to call +the predefined callback:: + + def build(bld): + bld(features='cxx cxxprogram test', source='main.c', target='app') + from waflib.Tools import waf_unit_test + bld.add_post_fun(waf_unit_test.summary) + +By passing --dump-test-scripts the build outputs corresponding python files +(with extension _run.py) that are useful for debugging purposes. +""" + +import os, shlex, sys +from waflib.TaskGen import feature, after_method, taskgen_method +from waflib import Utils, Task, Logs, Options +from waflib.Tools import ccroot +testlock = Utils.threading.Lock() + +SCRIPT_TEMPLATE = """#! %(python)s +import subprocess, sys +cmd = %(cmd)r +# if you want to debug with gdb: +#cmd = ['gdb', '-args'] + cmd +env = %(env)r +status = subprocess.call(cmd, env=env, cwd=%(cwd)r, shell=isinstance(cmd, str)) +sys.exit(status) +""" + +@taskgen_method +def handle_ut_cwd(self, key): + """ + Task generator method, used internally to limit code duplication. + This method may disappear anytime. + """ + cwd = getattr(self, key, None) + if cwd: + if isinstance(cwd, str): + # we want a Node instance + if os.path.isabs(cwd): + self.ut_cwd = self.bld.root.make_node(cwd) + else: + self.ut_cwd = self.path.make_node(cwd) + +@feature('test_scripts') +def make_interpreted_test(self): + """Create interpreted unit tests.""" + for x in ['test_scripts_source', 'test_scripts_template']: + if not hasattr(self, x): + Logs.warn('a test_scripts taskgen i missing %s' % x) + return + + self.ut_run, lst = Task.compile_fun(self.test_scripts_template, shell=getattr(self, 'test_scripts_shell', False)) + + script_nodes = self.to_nodes(self.test_scripts_source) + for script_node in script_nodes: + tsk = self.create_task('utest', [script_node]) + tsk.vars = lst + tsk.vars + tsk.env['SCRIPT'] = script_node.path_from(tsk.get_cwd()) + + self.handle_ut_cwd('test_scripts_cwd') + + env = getattr(self, 'test_scripts_env', None) + if env: + self.ut_env = env + else: + self.ut_env = dict(os.environ) + + paths = getattr(self, 'test_scripts_paths', {}) + for (k,v) in paths.items(): + p = self.ut_env.get(k, '').split(os.pathsep) + if isinstance(v, str): + v = v.split(os.pathsep) + self.ut_env[k] = os.pathsep.join(p + v) + self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env]) + +@feature('test') +@after_method('apply_link', 'process_use') +def make_test(self): + """Create the unit test task. There can be only one unit test task by task generator.""" + if not getattr(self, 'link_task', None): + return + + tsk = self.create_task('utest', self.link_task.outputs) + if getattr(self, 'ut_str', None): + self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) + tsk.vars = tsk.vars + lst + self.env.append_value('UT_DEPS', self.ut_str) + + self.handle_ut_cwd('ut_cwd') + + if not hasattr(self, 'ut_paths'): + paths = [] + for x in self.tmp_use_sorted: + try: + y = self.bld.get_tgen_by_name(x).link_task + except AttributeError: + pass + else: + if not isinstance(y, ccroot.stlink_task): + paths.append(y.outputs[0].parent.abspath()) + self.ut_paths = os.pathsep.join(paths) + os.pathsep + + if not hasattr(self, 'ut_env'): + self.ut_env = dct = dict(os.environ) + def add_path(var): + dct[var] = self.ut_paths + dct.get(var,'') + if Utils.is_win32: + add_path('PATH') + elif Utils.unversioned_sys_platform() == 'darwin': + add_path('DYLD_LIBRARY_PATH') + add_path('LD_LIBRARY_PATH') + else: + add_path('LD_LIBRARY_PATH') + + if not hasattr(self, 'ut_cmd'): + self.ut_cmd = getattr(Options.options, 'testcmd', False) + + self.env.append_value('UT_DEPS', str(self.ut_cmd)) + self.env.append_value('UT_DEPS', self.ut_paths) + self.env.append_value('UT_DEPS', ['%r%r' % (key, self.ut_env[key]) for key in self.ut_env]) + +@taskgen_method +def add_test_results(self, tup): + """Override and return tup[1] to interrupt the build immediately if a test does not run""" + Logs.debug("ut: %r", tup) + try: + self.utest_results.append(tup) + except AttributeError: + self.utest_results = [tup] + try: + self.bld.utest_results.append(tup) + except AttributeError: + self.bld.utest_results = [tup] + +@Task.deep_inputs +class utest(Task.Task): + """ + Execute a unit test + """ + color = 'PINK' + after = ['vnum', 'inst'] + vars = ['UT_DEPS'] + + def runnable_status(self): + """ + Always execute the task if `waf --alltests` was used or no + tests if ``waf --notests`` was used + """ + if getattr(Options.options, 'no_tests', False): + return Task.SKIP_ME + + ret = super(utest, self).runnable_status() + if ret == Task.SKIP_ME: + if getattr(Options.options, 'all_tests', False): + return Task.RUN_ME + return ret + + def get_test_env(self): + """ + In general, tests may require any library built anywhere in the project. + Override this method if fewer paths are needed + """ + return self.generator.ut_env + + def post_run(self): + super(utest, self).post_run() + if getattr(Options.options, 'clear_failed_tests', False) and self.waf_unit_test_results[1]: + self.generator.bld.task_sigs[self.uid()] = None + + def run(self): + """ + Execute the test. The execution is always successful, and the results + are stored on ``self.generator.bld.utest_results`` for postprocessing. + + Override ``add_test_results`` to interrupt the build + """ + if hasattr(self.generator, 'ut_run'): + return self.generator.ut_run(self) + + self.ut_exec = getattr(self.generator, 'ut_exec', [self.inputs[0].abspath()]) + ut_cmd = getattr(self.generator, 'ut_cmd', False) + if ut_cmd: + self.ut_exec = shlex.split(ut_cmd % Utils.shell_escape(self.ut_exec)) + + return self.exec_command(self.ut_exec) + + def exec_command(self, cmd, **kw): + self.generator.bld.log_command(cmd, kw) + if getattr(Options.options, 'dump_test_scripts', False): + script_code = SCRIPT_TEMPLATE % { + 'python': sys.executable, + 'env': self.get_test_env(), + 'cwd': self.get_cwd().abspath(), + 'cmd': cmd + } + script_file = self.inputs[0].abspath() + '_run.py' + Utils.writef(script_file, script_code, encoding='utf-8') + os.chmod(script_file, Utils.O755) + if Logs.verbose > 1: + Logs.info('Test debug file written as %r' % script_file) + + proc = Utils.subprocess.Popen(cmd, cwd=self.get_cwd().abspath(), env=self.get_test_env(), + stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE, shell=isinstance(cmd,str)) + (stdout, stderr) = proc.communicate() + self.waf_unit_test_results = tup = (self.inputs[0].abspath(), proc.returncode, stdout, stderr) + testlock.acquire() + try: + return self.generator.add_test_results(tup) + finally: + testlock.release() + + def get_cwd(self): + return getattr(self.generator, 'ut_cwd', self.inputs[0].parent) + +def summary(bld): + """ + Display an execution summary:: + + def build(bld): + bld(features='cxx cxxprogram test', source='main.c', target='app') + from waflib.Tools import waf_unit_test + bld.add_post_fun(waf_unit_test.summary) + """ + lst = getattr(bld, 'utest_results', []) + if lst: + Logs.pprint('CYAN', 'execution summary') + + total = len(lst) + tfail = len([x for x in lst if x[1]]) + + Logs.pprint('GREEN', ' tests that pass %d/%d' % (total-tfail, total)) + for (f, code, out, err) in lst: + if not code: + Logs.pprint('GREEN', ' %s' % f) + + Logs.pprint('GREEN' if tfail == 0 else 'RED', ' tests that fail %d/%d' % (tfail, total)) + for (f, code, out, err) in lst: + if code: + Logs.pprint('RED', ' %s' % f) + +def set_exit_code(bld): + """ + If any of the tests fail waf will exit with that exit code. + This is useful if you have an automated build system which need + to report on errors from the tests. + You may use it like this: + + def build(bld): + bld(features='cxx cxxprogram test', source='main.c', target='app') + from waflib.Tools import waf_unit_test + bld.add_post_fun(waf_unit_test.set_exit_code) + """ + lst = getattr(bld, 'utest_results', []) + for (f, code, out, err) in lst: + if code: + msg = [] + if out: + msg.append('stdout:%s%s' % (os.linesep, out.decode('utf-8'))) + if err: + msg.append('stderr:%s%s' % (os.linesep, err.decode('utf-8'))) + bld.fatal(os.linesep.join(msg)) + + +def options(opt): + """ + Provide the ``--alltests``, ``--notests`` and ``--testcmd`` command-line options. + """ + opt.add_option('--notests', action='store_true', default=False, help='Exec no unit tests', dest='no_tests') + opt.add_option('--alltests', action='store_true', default=False, help='Exec all unit tests', dest='all_tests') + opt.add_option('--clear-failed', action='store_true', default=False, + help='Force failed unit tests to run again next time', dest='clear_failed_tests') + opt.add_option('--testcmd', action='store', default=False, dest='testcmd', + help='Run the unit tests using the test-cmd string example "--testcmd="valgrind --error-exitcode=1 %s" to run under valgrind') + opt.add_option('--dump-test-scripts', action='store_true', default=False, + help='Create python scripts to help debug tests', dest='dump_test_scripts') + diff --git a/third_party/waf/waflib/Tools/winres.py b/third_party/waf/waflib/Tools/winres.py new file mode 100644 index 0000000..73c0e95 --- /dev/null +++ b/third_party/waf/waflib/Tools/winres.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Brant Young, 2007 + +"Process *.rc* files for C/C++: X{.rc -> [.res|.rc.o]}" + +import os +import re +from waflib import Task +from waflib.TaskGen import extension +from waflib.Tools import c_preproc +from waflib import Utils + +@extension('.rc') +def rc_file(self, node): + """ + Binds the .rc extension to a winrc task + """ + obj_ext = '.rc.o' + if self.env.WINRC_TGT_F == '/fo': + obj_ext = '.res' + rctask = self.create_task('winrc', node, node.change_ext(obj_ext)) + try: + self.compiled_tasks.append(rctask) + except AttributeError: + self.compiled_tasks = [rctask] + +re_lines = re.compile( + r'(?:^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*?)\s*$)|'\ + r'(?:^\w+[ \t]*(ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)[ \t]*(.*?)\s*$)', + re.IGNORECASE | re.MULTILINE) + +class rc_parser(c_preproc.c_parser): + """ + Calculates dependencies in .rc files + """ + def filter_comments(self, node): + """ + Overrides :py:meth:`waflib.Tools.c_preproc.c_parser.filter_comments` + """ + code = node.read() + if c_preproc.use_trigraphs: + for (a, b) in c_preproc.trig_def: + code = code.split(a).join(b) + code = c_preproc.re_nl.sub('', code) + code = c_preproc.re_cpp.sub(c_preproc.repl, code) + ret = [] + for m in re.finditer(re_lines, code): + if m.group(2): + ret.append((m.group(2), m.group(3))) + else: + ret.append(('include', m.group(5))) + return ret + +class winrc(Task.Task): + """ + Compiles resource files + """ + run_str = '${WINRC} ${WINRCFLAGS} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${WINRC_TGT_F} ${TGT} ${WINRC_SRC_F} ${SRC}' + color = 'BLUE' + def scan(self): + tmp = rc_parser(self.generator.includes_nodes) + tmp.start(self.inputs[0], self.env) + return (tmp.nodes, tmp.names) + + def exec_command(self, cmd, **kw): + if self.env.WINRC_TGT_F == '/fo': + # Since winres include paths may contain spaces, they do not fit in + # response files and are best passed as environment variables + replace_cmd = [] + incpaths = [] + while cmd: + # filter include path flags + flag = cmd.pop(0) + if flag.upper().startswith('/I'): + if len(flag) == 2: + incpaths.append(cmd.pop(0)) + else: + incpaths.append(flag[2:]) + else: + replace_cmd.append(flag) + cmd = replace_cmd + if incpaths: + # append to existing environment variables in INCLUDE + env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) + pre_includes = env.get('INCLUDE', '') + env['INCLUDE'] = pre_includes + os.pathsep + os.pathsep.join(incpaths) + + return super(winrc, self).exec_command(cmd, **kw) + + def quote_flag(self, flag): + if self.env.WINRC_TGT_F == '/fo': + # winres does not support quotes around flags in response files + return flag + + return super(winrc, self).quote_flag(flag) + + +def configure(conf): + """ + Detects the programs RC or windres, depending on the C/C++ compiler in use + """ + v = conf.env + if not v.WINRC: + if v.CC_NAME == 'msvc': + conf.find_program('RC', var='WINRC', path_list=v.PATH) + v.WINRC_TGT_F = '/fo' + v.WINRC_SRC_F = '' + else: + conf.find_program('windres', var='WINRC', path_list=v.PATH) + v.WINRC_TGT_F = '-o' + v.WINRC_SRC_F = '-i' + diff --git a/third_party/waf/waflib/Tools/xlc.py b/third_party/waf/waflib/Tools/xlc.py new file mode 100644 index 0000000..134dd41 --- /dev/null +++ b/third_party/waf/waflib/Tools/xlc.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) +# Yinon Ehrlich, 2009 +# Michael Kuhn, 2009 + +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_xlc(conf): + """ + Detects the Aix C compiler + """ + cc = conf.find_program(['xlc_r', 'xlc'], var='CC') + conf.get_xlc_version(cc) + conf.env.CC_NAME = 'xlc' + +@conf +def xlc_common_flags(conf): + """ + Flags required for executing the Aix C compiler + """ + v = conf.env + + v.CC_SRC_F = [] + v.CC_TGT_F = ['-c', '-o'] + + if not v.LINK_CC: + v.LINK_CC = v.CC + + v.CCLNK_SRC_F = [] + v.CCLNK_TGT_F = ['-o'] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + v.RPATH_ST = '-Wl,-rpath,%s' + + v.SONAME_ST = [] + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + + v.LINKFLAGS_cprogram = ['-Wl,-brtl'] + v.cprogram_PATTERN = '%s' + + v.CFLAGS_cshlib = ['-fPIC'] + v.LINKFLAGS_cshlib = ['-G', '-Wl,-brtl,-bexpfull'] + v.cshlib_PATTERN = 'lib%s.so' + + v.LINKFLAGS_cstlib = [] + v.cstlib_PATTERN = 'lib%s.a' + +def configure(conf): + conf.find_xlc() + conf.find_ar() + conf.xlc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Tools/xlcxx.py b/third_party/waf/waflib/Tools/xlcxx.py new file mode 100644 index 0000000..76aa59b --- /dev/null +++ b/third_party/waf/waflib/Tools/xlcxx.py @@ -0,0 +1,65 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2018 (ita) +# Ralf Habacker, 2006 (rh) +# Yinon Ehrlich, 2009 +# Michael Kuhn, 2009 + +from waflib.Tools import ccroot, ar +from waflib.Configure import conf + +@conf +def find_xlcxx(conf): + """ + Detects the Aix C++ compiler + """ + cxx = conf.find_program(['xlc++_r', 'xlc++'], var='CXX') + conf.get_xlc_version(cxx) + conf.env.CXX_NAME = 'xlc++' + +@conf +def xlcxx_common_flags(conf): + """ + Flags required for executing the Aix C++ compiler + """ + v = conf.env + + v.CXX_SRC_F = [] + v.CXX_TGT_F = ['-c', '-o'] + + if not v.LINK_CXX: + v.LINK_CXX = v.CXX + + v.CXXLNK_SRC_F = [] + v.CXXLNK_TGT_F = ['-o'] + v.CPPPATH_ST = '-I%s' + v.DEFINES_ST = '-D%s' + + v.LIB_ST = '-l%s' # template for adding libs + v.LIBPATH_ST = '-L%s' # template for adding libpaths + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-L%s' + v.RPATH_ST = '-Wl,-rpath,%s' + + v.SONAME_ST = [] + v.SHLIB_MARKER = [] + v.STLIB_MARKER = [] + + v.LINKFLAGS_cxxprogram= ['-Wl,-brtl'] + v.cxxprogram_PATTERN = '%s' + + v.CXXFLAGS_cxxshlib = ['-fPIC'] + v.LINKFLAGS_cxxshlib = ['-G', '-Wl,-brtl,-bexpfull'] + v.cxxshlib_PATTERN = 'lib%s.so' + + v.LINKFLAGS_cxxstlib = [] + v.cxxstlib_PATTERN = 'lib%s.a' + +def configure(conf): + conf.find_xlcxx() + conf.find_ar() + conf.xlcxx_common_flags() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/Utils.py b/third_party/waf/waflib/Utils.py new file mode 100644 index 0000000..ea0f7a9 --- /dev/null +++ b/third_party/waf/waflib/Utils.py @@ -0,0 +1,1053 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) + +""" +Utilities and platform-specific fixes + +The portability fixes try to provide a consistent behavior of the Waf API +through Python versions 2.5 to 3.X and across different platforms (win32, linux, etc) +""" + +from __future__ import with_statement + +import atexit, os, sys, errno, inspect, re, datetime, platform, base64, signal, functools, time, shlex + +try: + import cPickle +except ImportError: + import pickle as cPickle + +# leave this +if os.name == 'posix' and sys.version_info[0] < 3: + try: + import subprocess32 as subprocess + except ImportError: + import subprocess +else: + import subprocess + +try: + TimeoutExpired = subprocess.TimeoutExpired +except AttributeError: + class TimeoutExpired(Exception): + pass + +from collections import deque, defaultdict + +try: + import _winreg as winreg +except ImportError: + try: + import winreg + except ImportError: + winreg = None + +from waflib import Errors + +try: + from hashlib import md5 +except ImportError: + try: + from hashlib import sha1 as md5 + except ImportError: + # never fail to enable potential fixes from another module + pass +else: + try: + md5().digest() + except ValueError: + # Fips? #2213 + from hashlib import sha1 as md5 + +try: + import threading +except ImportError: + if not 'JOBS' in os.environ: + # no threading :-( + os.environ['JOBS'] = '1' + + class threading(object): + """ + A fake threading class for platforms lacking the threading module. + Use ``waf -j1`` on those platforms + """ + pass + class Lock(object): + """Fake Lock class""" + def acquire(self): + pass + def release(self): + pass + threading.Lock = threading.Thread = Lock + +SIG_NIL = 'SIG_NIL_SIG_NIL_'.encode() +"""Arbitrary null value for hashes. Modify this value according to the hash function in use""" + +O644 = 420 +"""Constant representing the permissions for regular files (0644 raises a syntax error on python 3)""" + +O755 = 493 +"""Constant representing the permissions for executable files (0755 raises a syntax error on python 3)""" + +rot_chr = ['\\', '|', '/', '-'] +"List of characters to use when displaying the throbber (progress bar)" + +rot_idx = 0 +"Index of the current throbber character (progress bar)" + +class ordered_iter_dict(dict): + """Ordered dictionary that provides iteration from the most recently inserted keys first""" + def __init__(self, *k, **kw): + self.lst = deque() + dict.__init__(self, *k, **kw) + def clear(self): + dict.clear(self) + self.lst = deque() + def __setitem__(self, key, value): + if key in dict.keys(self): + self.lst.remove(key) + dict.__setitem__(self, key, value) + self.lst.append(key) + def __delitem__(self, key): + dict.__delitem__(self, key) + try: + self.lst.remove(key) + except ValueError: + pass + def __iter__(self): + return reversed(self.lst) + def keys(self): + return reversed(self.lst) + +class lru_node(object): + """ + Used by :py:class:`waflib.Utils.lru_cache` + """ + __slots__ = ('next', 'prev', 'key', 'val') + def __init__(self): + self.next = self + self.prev = self + self.key = None + self.val = None + +class lru_cache(object): + """ + A simple least-recently used cache with lazy allocation + """ + __slots__ = ('maxlen', 'table', 'head') + def __init__(self, maxlen=100): + self.maxlen = maxlen + """ + Maximum amount of elements in the cache + """ + self.table = {} + """ + Mapping key-value + """ + self.head = lru_node() + self.head.next = self.head + self.head.prev = self.head + + def __getitem__(self, key): + node = self.table[key] + # assert(key==node.key) + if node is self.head: + return node.val + + # detach the node found + node.prev.next = node.next + node.next.prev = node.prev + + # replace the head + node.next = self.head.next + node.prev = self.head + self.head = node.next.prev = node.prev.next = node + + return node.val + + def __setitem__(self, key, val): + if key in self.table: + # update the value for an existing key + node = self.table[key] + node.val = val + self.__getitem__(key) + else: + if len(self.table) < self.maxlen: + # the very first item is unused until the maximum is reached + node = lru_node() + node.prev = self.head + node.next = self.head.next + node.prev.next = node.next.prev = node + else: + node = self.head = self.head.next + try: + # that's another key + del self.table[node.key] + except KeyError: + pass + + node.key = key + node.val = val + self.table[key] = node + +class lazy_generator(object): + def __init__(self, fun, params): + self.fun = fun + self.params = params + + def __iter__(self): + return self + + def __next__(self): + try: + it = self.it + except AttributeError: + it = self.it = self.fun(*self.params) + return next(it) + + next = __next__ + +is_win32 = os.sep == '\\' or sys.platform == 'win32' or os.name == 'nt' # msys2 +""" +Whether this system is a Windows series +""" + +def readf(fname, m='r', encoding='latin-1'): + """ + Reads an entire file into a string. See also :py:meth:`waflib.Node.Node.readf`:: + + def build(ctx): + from waflib import Utils + txt = Utils.readf(self.path.find_node('wscript').abspath()) + txt = ctx.path.find_node('wscript').read() + + :type fname: string + :param fname: Path to file + :type m: string + :param m: Open mode + :type encoding: string + :param encoding: encoding value, only used for python 3 + :rtype: string + :return: Content of the file + """ + + if sys.hexversion > 0x3000000 and not 'b' in m: + m += 'b' + with open(fname, m) as f: + txt = f.read() + if encoding: + txt = txt.decode(encoding) + else: + txt = txt.decode() + else: + with open(fname, m) as f: + txt = f.read() + return txt + +def writef(fname, data, m='w', encoding='latin-1'): + """ + Writes an entire file from a string. + See also :py:meth:`waflib.Node.Node.writef`:: + + def build(ctx): + from waflib import Utils + txt = Utils.writef(self.path.make_node('i_like_kittens').abspath(), 'some data') + self.path.make_node('i_like_kittens').write('some data') + + :type fname: string + :param fname: Path to file + :type data: string + :param data: The contents to write to the file + :type m: string + :param m: Open mode + :type encoding: string + :param encoding: encoding value, only used for python 3 + """ + if sys.hexversion > 0x3000000 and not 'b' in m: + data = data.encode(encoding) + m += 'b' + with open(fname, m) as f: + f.write(data) + +def h_file(fname): + """ + Computes a hash value for a file by using md5. Use the md5_tstamp + extension to get faster build hashes if necessary. + + :type fname: string + :param fname: path to the file to hash + :return: hash of the file contents + :rtype: string or bytes + """ + m = md5() + with open(fname, 'rb') as f: + while fname: + fname = f.read(200000) + m.update(fname) + return m.digest() + +def readf_win32(f, m='r', encoding='latin-1'): + flags = os.O_NOINHERIT | os.O_RDONLY + if 'b' in m: + flags |= os.O_BINARY + if '+' in m: + flags |= os.O_RDWR + try: + fd = os.open(f, flags) + except OSError: + raise IOError('Cannot read from %r' % f) + + if sys.hexversion > 0x3000000 and not 'b' in m: + m += 'b' + with os.fdopen(fd, m) as f: + txt = f.read() + if encoding: + txt = txt.decode(encoding) + else: + txt = txt.decode() + else: + with os.fdopen(fd, m) as f: + txt = f.read() + return txt + +def writef_win32(f, data, m='w', encoding='latin-1'): + if sys.hexversion > 0x3000000 and not 'b' in m: + data = data.encode(encoding) + m += 'b' + flags = os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT + if 'b' in m: + flags |= os.O_BINARY + if '+' in m: + flags |= os.O_RDWR + try: + fd = os.open(f, flags) + except OSError: + raise OSError('Cannot write to %r' % f) + with os.fdopen(fd, m) as f: + f.write(data) + +def h_file_win32(fname): + try: + fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT) + except OSError: + raise OSError('Cannot read from %r' % fname) + m = md5() + with os.fdopen(fd, 'rb') as f: + while fname: + fname = f.read(200000) + m.update(fname) + return m.digest() + +# always save these +readf_unix = readf +writef_unix = writef +h_file_unix = h_file +if hasattr(os, 'O_NOINHERIT') and sys.hexversion < 0x3040000: + # replace the default functions + readf = readf_win32 + writef = writef_win32 + h_file = h_file_win32 + +try: + x = ''.encode('hex') +except LookupError: + import binascii + def to_hex(s): + ret = binascii.hexlify(s) + if not isinstance(ret, str): + ret = ret.decode('utf-8') + return ret +else: + def to_hex(s): + return s.encode('hex') + +to_hex.__doc__ = """ +Return the hexadecimal representation of a string + +:param s: string to convert +:type s: string +""" + +def listdir_win32(s): + """ + Lists the contents of a folder in a portable manner. + On Win32, returns the list of drive letters: ['C:', 'X:', 'Z:'] when an empty string is given. + + :type s: string + :param s: a string, which can be empty on Windows + """ + if not s: + try: + import ctypes + except ImportError: + # there is nothing much we can do + return [x + ':\\' for x in 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'] + else: + dlen = 4 # length of "?:\\x00" + maxdrives = 26 + buf = ctypes.create_string_buffer(maxdrives * dlen) + ndrives = ctypes.windll.kernel32.GetLogicalDriveStringsA(maxdrives*dlen, ctypes.byref(buf)) + return [ str(buf.raw[4*i:4*i+2].decode('ascii')) for i in range(int(ndrives/dlen)) ] + + if len(s) == 2 and s[1] == ":": + s += os.sep + + if not os.path.isdir(s): + e = OSError('%s is not a directory' % s) + e.errno = errno.ENOENT + raise e + return os.listdir(s) + +listdir = os.listdir +if is_win32: + listdir = listdir_win32 + +def num2ver(ver): + """ + Converts a string, tuple or version number into an integer. The number is supposed to have at most 4 digits:: + + from waflib.Utils import num2ver + num2ver('1.3.2') == num2ver((1,3,2)) == num2ver((1,3,2,0)) + + :type ver: string or tuple of numbers + :param ver: a version number + """ + if isinstance(ver, str): + ver = tuple(ver.split('.')) + if isinstance(ver, tuple): + ret = 0 + for i in range(4): + if i < len(ver): + ret += 256**(3 - i) * int(ver[i]) + return ret + return ver + +def to_list(val): + """ + Converts a string argument to a list by splitting it by spaces. + Returns the object if not a string:: + + from waflib.Utils import to_list + lst = to_list('a b c d') + + :param val: list of string or space-separated string + :rtype: list + :return: Argument converted to list + """ + if isinstance(val, str): + return val.split() + else: + return val + +def console_encoding(): + try: + import ctypes + except ImportError: + pass + else: + try: + codepage = ctypes.windll.kernel32.GetConsoleCP() + except AttributeError: + pass + else: + if codepage: + if 65001 == codepage and sys.version_info < (3, 3): + return 'utf-8' + return 'cp%d' % codepage + return sys.stdout.encoding or ('cp1252' if is_win32 else 'latin-1') + +def split_path_unix(path): + return path.split('/') + +def split_path_cygwin(path): + if path.startswith('//'): + ret = path.split('/')[2:] + ret[0] = '/' + ret[0] + return ret + return path.split('/') + +re_sp = re.compile('[/\\\\]+') +def split_path_win32(path): + if path.startswith('\\\\'): + ret = re_sp.split(path)[1:] + ret[0] = '\\\\' + ret[0] + if ret[0] == '\\\\?': + return ret[1:] + return ret + return re_sp.split(path) + +msysroot = None +def split_path_msys(path): + if path.startswith(('/', '\\')) and not path.startswith(('//', '\\\\')): + # msys paths can be in the form /usr/bin + global msysroot + if not msysroot: + # msys has python 2.7 or 3, so we can use this + msysroot = subprocess.check_output(['cygpath', '-w', '/']).decode(sys.stdout.encoding or 'latin-1') + msysroot = msysroot.strip() + path = os.path.normpath(msysroot + os.sep + path) + return split_path_win32(path) + +if sys.platform == 'cygwin': + split_path = split_path_cygwin +elif is_win32: + # Consider this an MSYSTEM environment if $MSYSTEM is set and python + # reports is executable from a unix like path on a windows host. + if os.environ.get('MSYSTEM') and sys.executable.startswith('/'): + split_path = split_path_msys + else: + split_path = split_path_win32 +else: + split_path = split_path_unix + +split_path.__doc__ = """ +Splits a path by / or \\; do not confuse this function with with ``os.path.split`` + +:type path: string +:param path: path to split +:return: list of string +""" + +def check_dir(path): + """ + Ensures that a directory exists (similar to ``mkdir -p``). + + :type path: string + :param path: Path to directory + :raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added. + """ + if not os.path.isdir(path): + try: + os.makedirs(path) + except OSError as e: + if not os.path.isdir(path): + raise Errors.WafError('Cannot create the folder %r' % path, ex=e) + +def check_exe(name, env=None): + """ + Ensures that a program exists + + :type name: string + :param name: path to the program + :param env: configuration object + :type env: :py:class:`waflib.ConfigSet.ConfigSet` + :return: path of the program or None + :raises: :py:class:`waflib.Errors.WafError` if the folder cannot be added. + """ + if not name: + raise ValueError('Cannot execute an empty string!') + def is_exe(fpath): + return os.path.isfile(fpath) and os.access(fpath, os.X_OK) + + fpath, fname = os.path.split(name) + if fpath and is_exe(name): + return os.path.abspath(name) + else: + env = env or os.environ + for path in env['PATH'].split(os.pathsep): + path = path.strip('"') + exe_file = os.path.join(path, name) + if is_exe(exe_file): + return os.path.abspath(exe_file) + return None + +def def_attrs(cls, **kw): + """ + Sets default attributes on a class instance + + :type cls: class + :param cls: the class to update the given attributes in. + :type kw: dict + :param kw: dictionary of attributes names and values. + """ + for k, v in kw.items(): + if not hasattr(cls, k): + setattr(cls, k, v) + +def quote_define_name(s): + """ + Converts a string into an identifier suitable for C defines. + + :type s: string + :param s: String to convert + :rtype: string + :return: Identifier suitable for C defines + """ + fu = re.sub('[^a-zA-Z0-9]', '_', s) + fu = re.sub('_+', '_', fu) + fu = fu.upper() + return fu + +# shlex.quote didn't exist until python 3.3. Prior to that it was a non-documented +# function in pipes. +try: + shell_quote = shlex.quote +except AttributeError: + import pipes + shell_quote = pipes.quote + +def shell_escape(cmd): + """ + Escapes a command: + ['ls', '-l', 'arg space'] -> ls -l 'arg space' + """ + if isinstance(cmd, str): + return cmd + return ' '.join(shell_quote(x) for x in cmd) + +def h_list(lst): + """ + Hashes lists of ordered data. + + Using hash(tup) for tuples would be much more efficient, + but Python now enforces hash randomization + + :param lst: list to hash + :type lst: list of strings + :return: hash of the list + """ + return md5(repr(lst).encode()).digest() + +if sys.hexversion < 0x3000000: + def h_list_python2(lst): + return md5(repr(lst)).digest() + h_list_python2.__doc__ = h_list.__doc__ + h_list = h_list_python2 + +def h_fun(fun): + """ + Hash functions + + :param fun: function to hash + :type fun: function + :return: hash of the function + :rtype: string or bytes + """ + try: + return fun.code + except AttributeError: + if isinstance(fun, functools.partial): + code = list(fun.args) + # The method items() provides a sequence of tuples where the first element + # represents an optional argument of the partial function application + # + # The sorting result outcome will be consistent because: + # 1. tuples are compared in order of their elements + # 2. optional argument namess are unique + code.extend(sorted(fun.keywords.items())) + code.append(h_fun(fun.func)) + fun.code = h_list(code) + return fun.code + try: + h = inspect.getsource(fun) + except EnvironmentError: + h = 'nocode' + try: + fun.code = h + except AttributeError: + pass + return h + +def h_cmd(ins): + """ + Hashes objects recursively + + :param ins: input object + :type ins: string or list or tuple or function + :rtype: string or bytes + """ + # this function is not meant to be particularly fast + if isinstance(ins, str): + # a command is either a string + ret = ins + elif isinstance(ins, list) or isinstance(ins, tuple): + # or a list of functions/strings + ret = str([h_cmd(x) for x in ins]) + else: + # or just a python function + ret = str(h_fun(ins)) + if sys.hexversion > 0x3000000: + ret = ret.encode('latin-1', 'xmlcharrefreplace') + return ret + +reg_subst = re.compile(r"(\\\\)|(\$\$)|\$\{([^}]+)\}") +def subst_vars(expr, params): + """ + Replaces ${VAR} with the value of VAR taken from a dict or a config set:: + + from waflib import Utils + s = Utils.subst_vars('${PREFIX}/bin', env) + + :type expr: string + :param expr: String to perform substitution on + :param params: Dictionary or config set to look up variable values. + """ + def repl_var(m): + if m.group(1): + return '\\' + if m.group(2): + return '$' + try: + # ConfigSet instances may contain lists + return params.get_flat(m.group(3)) + except AttributeError: + return params[m.group(3)] + # if you get a TypeError, it means that 'expr' is not a string... + # Utils.subst_vars(None, env) will not work + return reg_subst.sub(repl_var, expr) + +def destos_to_binfmt(key): + """ + Returns the binary format based on the unversioned platform name, + and defaults to ``elf`` if nothing is found. + + :param key: platform name + :type key: string + :return: string representing the binary format + """ + if key == 'darwin': + return 'mac-o' + elif key in ('win32', 'cygwin', 'uwin', 'msys'): + return 'pe' + return 'elf' + +def unversioned_sys_platform(): + """ + Returns the unversioned platform name. + Some Python platform names contain versions, that depend on + the build environment, e.g. linux2, freebsd6, etc. + This returns the name without the version number. Exceptions are + os2 and win32, which are returned verbatim. + + :rtype: string + :return: Unversioned platform name + """ + s = sys.platform + if s.startswith('java'): + # The real OS is hidden under the JVM. + from java.lang import System + s = System.getProperty('os.name') + # see http://lopica.sourceforge.net/os.html for a list of possible values + if s == 'Mac OS X': + return 'darwin' + elif s.startswith('Windows '): + return 'win32' + elif s == 'OS/2': + return 'os2' + elif s == 'HP-UX': + return 'hp-ux' + elif s in ('SunOS', 'Solaris'): + return 'sunos' + else: s = s.lower() + + # powerpc == darwin for our purposes + if s == 'powerpc': + return 'darwin' + if s == 'win32' or s == 'os2': + return s + if s == 'cli' and os.name == 'nt': + # ironpython is only on windows as far as we know + return 'win32' + return re.split(r'\d+$', s)[0] + +def nada(*k, **kw): + """ + Does nothing + + :return: None + """ + pass + +class Timer(object): + """ + Simple object for timing the execution of commands. + Its string representation is the duration:: + + from waflib.Utils import Timer + timer = Timer() + a_few_operations() + s = str(timer) + """ + def __init__(self): + self.start_time = self.now() + + def __str__(self): + delta = self.now() - self.start_time + if not isinstance(delta, datetime.timedelta): + delta = datetime.timedelta(seconds=delta) + days = delta.days + hours, rem = divmod(delta.seconds, 3600) + minutes, seconds = divmod(rem, 60) + seconds += delta.microseconds * 1e-6 + result = '' + if days: + result += '%dd' % days + if days or hours: + result += '%dh' % hours + if days or hours or minutes: + result += '%dm' % minutes + return '%s%.3fs' % (result, seconds) + + def now(self): + return datetime.datetime.utcnow() + + if hasattr(time, 'perf_counter'): + def now(self): + return time.perf_counter() + +def read_la_file(path): + """ + Reads property files, used by msvc.py + + :param path: file to read + :type path: string + """ + sp = re.compile(r'^([^=]+)=\'(.*)\'$') + dc = {} + for line in readf(path).splitlines(): + try: + _, left, right, _ = sp.split(line.strip()) + dc[left] = right + except ValueError: + pass + return dc + +def run_once(fun): + """ + Decorator: let a function cache its results, use like this:: + + @run_once + def foo(k): + return 345*2343 + + .. note:: in practice this can cause memory leaks, prefer a :py:class:`waflib.Utils.lru_cache` + + :param fun: function to execute + :type fun: function + :return: the return value of the function executed + """ + cache = {} + def wrap(*k): + try: + return cache[k] + except KeyError: + ret = fun(*k) + cache[k] = ret + return ret + wrap.__cache__ = cache + wrap.__name__ = fun.__name__ + return wrap + +def get_registry_app_path(key, filename): + """ + Returns the value of a registry key for an executable + + :type key: string + :type filename: list of string + """ + if not winreg: + return None + try: + result = winreg.QueryValue(key, "Software\\Microsoft\\Windows\\CurrentVersion\\App Paths\\%s.exe" % filename[0]) + except OSError: + pass + else: + if os.path.isfile(result): + return result + +def lib64(): + """ + Guess the default ``/usr/lib`` extension for 64-bit applications + + :return: '64' or '' + :rtype: string + """ + # default settings for /usr/lib + if os.sep == '/': + if platform.architecture()[0] == '64bit': + if os.path.exists('/usr/lib64') and not os.path.exists('/usr/lib32'): + return '64' + return '' + +def loose_version(ver_str): + # private for the time being! + # see #2402 + lst = re.split(r'([.]|\\d+|[a-zA-Z])', ver_str) + ver = [] + for i, val in enumerate(lst): + try: + ver.append(int(val)) + except ValueError: + if val != '.': + ver.append(val) + return ver + +def sane_path(p): + # private function for the time being! + return os.path.abspath(os.path.expanduser(p)) + +process_pool = [] +""" +List of processes started to execute sub-process commands +""" + +def get_process(): + """ + Returns a process object that can execute commands as sub-processes + + :rtype: subprocess.Popen + """ + try: + return process_pool.pop() + except IndexError: + filepath = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'processor.py' + cmd = [sys.executable, '-c', readf(filepath)] + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0, close_fds=not is_win32) + +def run_prefork_process(cmd, kwargs, cargs): + """ + Delegates process execution to a pre-forked process instance. + """ + if not kwargs.get('env'): + kwargs['env'] = dict(os.environ) + try: + obj = base64.b64encode(cPickle.dumps([cmd, kwargs, cargs])) + except (TypeError, AttributeError): + return run_regular_process(cmd, kwargs, cargs) + + proc = get_process() + if not proc: + return run_regular_process(cmd, kwargs, cargs) + + proc.stdin.write(obj) + proc.stdin.write('\n'.encode()) + proc.stdin.flush() + obj = proc.stdout.readline() + if not obj: + raise OSError('Preforked sub-process %r died' % proc.pid) + + process_pool.append(proc) + lst = cPickle.loads(base64.b64decode(obj)) + # Jython wrapper failures (bash/execvp) + assert len(lst) == 5 + ret, out, err, ex, trace = lst + if ex: + if ex == 'OSError': + raise OSError(trace) + elif ex == 'ValueError': + raise ValueError(trace) + elif ex == 'TimeoutExpired': + exc = TimeoutExpired(cmd, timeout=cargs['timeout'], output=out) + exc.stderr = err + raise exc + else: + raise Exception(trace) + return ret, out, err + +def lchown(path, user=-1, group=-1): + """ + Change the owner/group of a path, raises an OSError if the + ownership change fails. + + :param user: user to change + :type user: int or str + :param group: group to change + :type group: int or str + """ + if isinstance(user, str): + import pwd + entry = pwd.getpwnam(user) + if not entry: + raise OSError('Unknown user %r' % user) + user = entry[2] + if isinstance(group, str): + import grp + entry = grp.getgrnam(group) + if not entry: + raise OSError('Unknown group %r' % group) + group = entry[2] + return os.lchown(path, user, group) + +def run_regular_process(cmd, kwargs, cargs={}): + """ + Executes a subprocess command by using subprocess.Popen + """ + proc = subprocess.Popen(cmd, **kwargs) + if kwargs.get('stdout') or kwargs.get('stderr'): + try: + out, err = proc.communicate(**cargs) + except TimeoutExpired: + if kwargs.get('start_new_session') and hasattr(os, 'killpg'): + os.killpg(proc.pid, signal.SIGKILL) + else: + proc.kill() + out, err = proc.communicate() + exc = TimeoutExpired(proc.args, timeout=cargs['timeout'], output=out) + exc.stderr = err + raise exc + status = proc.returncode + else: + out, err = (None, None) + try: + status = proc.wait(**cargs) + except TimeoutExpired as e: + if kwargs.get('start_new_session') and hasattr(os, 'killpg'): + os.killpg(proc.pid, signal.SIGKILL) + else: + proc.kill() + proc.wait() + raise e + return status, out, err + +def run_process(cmd, kwargs, cargs={}): + """ + Executes a subprocess by using a pre-forked process when possible + or falling back to subprocess.Popen. See :py:func:`waflib.Utils.run_prefork_process` + and :py:func:`waflib.Utils.run_regular_process` + """ + if kwargs.get('stdout') and kwargs.get('stderr'): + return run_prefork_process(cmd, kwargs, cargs) + else: + return run_regular_process(cmd, kwargs, cargs) + +def alloc_process_pool(n, force=False): + """ + Allocates an amount of processes to the default pool so its size is at least *n*. + It is useful to call this function early so that the pre-forked + processes use as little memory as possible. + + :param n: pool size + :type n: integer + :param force: if True then *n* more processes are added to the existing pool + :type force: bool + """ + # mandatory on python2, unnecessary on python >= 3.2 + global run_process, get_process, alloc_process_pool + if not force: + n = max(n - len(process_pool), 0) + try: + lst = [get_process() for x in range(n)] + except OSError: + run_process = run_regular_process + get_process = alloc_process_pool = nada + else: + for x in lst: + process_pool.append(x) + +def atexit_pool(): + for k in process_pool: + try: + os.kill(k.pid, 9) + except OSError: + pass + else: + k.wait() +# see #1889 +if (sys.hexversion<0x207000f and not is_win32) or sys.hexversion>=0x306000f: + atexit.register(atexit_pool) + +if os.environ.get('WAF_NO_PREFORK') or sys.platform == 'cli' or not sys.executable: + run_process = run_regular_process + get_process = alloc_process_pool = nada + diff --git a/third_party/waf/waflib/__init__.py b/third_party/waf/waflib/__init__.py new file mode 100644 index 0000000..079df35 --- /dev/null +++ b/third_party/waf/waflib/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2018 (ita) diff --git a/third_party/waf/waflib/ansiterm.py b/third_party/waf/waflib/ansiterm.py new file mode 100644 index 0000000..027f0ad --- /dev/null +++ b/third_party/waf/waflib/ansiterm.py @@ -0,0 +1,342 @@ +#!/usr/bin/env python +# encoding: utf-8 + +""" +Emulate a vt100 terminal in cmd.exe + +By wrapping sys.stdout / sys.stderr with Ansiterm, +the vt100 escape characters will be interpreted and +the equivalent actions will be performed with Win32 +console commands. + +""" + +import os, re, sys +from waflib import Utils + +wlock = Utils.threading.Lock() + +try: + from ctypes import Structure, windll, c_short, c_ushort, c_ulong, c_int, byref, c_wchar, POINTER, c_long +except ImportError: + + class AnsiTerm(object): + def __init__(self, stream): + self.stream = stream + try: + self.errors = self.stream.errors + except AttributeError: + pass # python 2.5 + self.encoding = self.stream.encoding + + def write(self, txt): + try: + wlock.acquire() + self.stream.write(txt) + self.stream.flush() + finally: + wlock.release() + + def fileno(self): + return self.stream.fileno() + + def flush(self): + self.stream.flush() + + def isatty(self): + return self.stream.isatty() +else: + + class COORD(Structure): + _fields_ = [("X", c_short), ("Y", c_short)] + + class SMALL_RECT(Structure): + _fields_ = [("Left", c_short), ("Top", c_short), ("Right", c_short), ("Bottom", c_short)] + + class CONSOLE_SCREEN_BUFFER_INFO(Structure): + _fields_ = [("Size", COORD), ("CursorPosition", COORD), ("Attributes", c_ushort), ("Window", SMALL_RECT), ("MaximumWindowSize", COORD)] + + class CONSOLE_CURSOR_INFO(Structure): + _fields_ = [('dwSize', c_ulong), ('bVisible', c_int)] + + try: + _type = unicode + except NameError: + _type = str + + to_int = lambda number, default: number and int(number) or default + + STD_OUTPUT_HANDLE = -11 + STD_ERROR_HANDLE = -12 + + windll.kernel32.GetStdHandle.argtypes = [c_ulong] + windll.kernel32.GetStdHandle.restype = c_ulong + windll.kernel32.GetConsoleScreenBufferInfo.argtypes = [c_ulong, POINTER(CONSOLE_SCREEN_BUFFER_INFO)] + windll.kernel32.GetConsoleScreenBufferInfo.restype = c_long + windll.kernel32.SetConsoleTextAttribute.argtypes = [c_ulong, c_ushort] + windll.kernel32.SetConsoleTextAttribute.restype = c_long + windll.kernel32.FillConsoleOutputCharacterW.argtypes = [c_ulong, c_wchar, c_ulong, POINTER(COORD), POINTER(c_ulong)] + windll.kernel32.FillConsoleOutputCharacterW.restype = c_long + windll.kernel32.FillConsoleOutputAttribute.argtypes = [c_ulong, c_ushort, c_ulong, POINTER(COORD), POINTER(c_ulong) ] + windll.kernel32.FillConsoleOutputAttribute.restype = c_long + windll.kernel32.SetConsoleCursorPosition.argtypes = [c_ulong, POINTER(COORD) ] + windll.kernel32.SetConsoleCursorPosition.restype = c_long + windll.kernel32.SetConsoleCursorInfo.argtypes = [c_ulong, POINTER(CONSOLE_CURSOR_INFO)] + windll.kernel32.SetConsoleCursorInfo.restype = c_long + + class AnsiTerm(object): + """ + emulate a vt100 terminal in cmd.exe + """ + def __init__(self, s): + self.stream = s + try: + self.errors = s.errors + except AttributeError: + pass # python2.5 + self.encoding = s.encoding + self.cursor_history = [] + + handle = (s.fileno() == 2) and STD_ERROR_HANDLE or STD_OUTPUT_HANDLE + self.hconsole = windll.kernel32.GetStdHandle(handle) + + self._sbinfo = CONSOLE_SCREEN_BUFFER_INFO() + + self._csinfo = CONSOLE_CURSOR_INFO() + windll.kernel32.GetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) + + # just to double check that the console is usable + self._orig_sbinfo = CONSOLE_SCREEN_BUFFER_INFO() + r = windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self._orig_sbinfo)) + self._isatty = r == 1 + + def screen_buffer_info(self): + """ + Updates self._sbinfo and returns it + """ + windll.kernel32.GetConsoleScreenBufferInfo(self.hconsole, byref(self._sbinfo)) + return self._sbinfo + + def clear_line(self, param): + mode = param and int(param) or 0 + sbinfo = self.screen_buffer_info() + if mode == 1: # Clear from beginning of line to cursor position + line_start = COORD(0, sbinfo.CursorPosition.Y) + line_length = sbinfo.Size.X + elif mode == 2: # Clear entire line + line_start = COORD(sbinfo.CursorPosition.X, sbinfo.CursorPosition.Y) + line_length = sbinfo.Size.X - sbinfo.CursorPosition.X + else: # Clear from cursor position to end of line + line_start = sbinfo.CursorPosition + line_length = sbinfo.Size.X - sbinfo.CursorPosition.X + chars_written = c_ulong() + windll.kernel32.FillConsoleOutputCharacterW(self.hconsole, c_wchar(' '), line_length, line_start, byref(chars_written)) + windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, line_length, line_start, byref(chars_written)) + + def clear_screen(self, param): + mode = to_int(param, 0) + sbinfo = self.screen_buffer_info() + if mode == 1: # Clear from beginning of screen to cursor position + clear_start = COORD(0, 0) + clear_length = sbinfo.CursorPosition.X * sbinfo.CursorPosition.Y + elif mode == 2: # Clear entire screen and return cursor to home + clear_start = COORD(0, 0) + clear_length = sbinfo.Size.X * sbinfo.Size.Y + windll.kernel32.SetConsoleCursorPosition(self.hconsole, clear_start) + else: # Clear from cursor position to end of screen + clear_start = sbinfo.CursorPosition + clear_length = ((sbinfo.Size.X - sbinfo.CursorPosition.X) + sbinfo.Size.X * (sbinfo.Size.Y - sbinfo.CursorPosition.Y)) + chars_written = c_ulong() + windll.kernel32.FillConsoleOutputCharacterW(self.hconsole, c_wchar(' '), clear_length, clear_start, byref(chars_written)) + windll.kernel32.FillConsoleOutputAttribute(self.hconsole, sbinfo.Attributes, clear_length, clear_start, byref(chars_written)) + + def push_cursor(self, param): + sbinfo = self.screen_buffer_info() + self.cursor_history.append(sbinfo.CursorPosition) + + def pop_cursor(self, param): + if self.cursor_history: + old_pos = self.cursor_history.pop() + windll.kernel32.SetConsoleCursorPosition(self.hconsole, old_pos) + + def set_cursor(self, param): + y, sep, x = param.partition(';') + x = to_int(x, 1) - 1 + y = to_int(y, 1) - 1 + sbinfo = self.screen_buffer_info() + new_pos = COORD( + min(max(0, x), sbinfo.Size.X), + min(max(0, y), sbinfo.Size.Y) + ) + windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) + + def set_column(self, param): + x = to_int(param, 1) - 1 + sbinfo = self.screen_buffer_info() + new_pos = COORD( + min(max(0, x), sbinfo.Size.X), + sbinfo.CursorPosition.Y + ) + windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) + + def move_cursor(self, x_offset=0, y_offset=0): + sbinfo = self.screen_buffer_info() + new_pos = COORD( + min(max(0, sbinfo.CursorPosition.X + x_offset), sbinfo.Size.X), + min(max(0, sbinfo.CursorPosition.Y + y_offset), sbinfo.Size.Y) + ) + windll.kernel32.SetConsoleCursorPosition(self.hconsole, new_pos) + + def move_up(self, param): + self.move_cursor(y_offset = -to_int(param, 1)) + + def move_down(self, param): + self.move_cursor(y_offset = to_int(param, 1)) + + def move_left(self, param): + self.move_cursor(x_offset = -to_int(param, 1)) + + def move_right(self, param): + self.move_cursor(x_offset = to_int(param, 1)) + + def next_line(self, param): + sbinfo = self.screen_buffer_info() + self.move_cursor( + x_offset = -sbinfo.CursorPosition.X, + y_offset = to_int(param, 1) + ) + + def prev_line(self, param): + sbinfo = self.screen_buffer_info() + self.move_cursor( + x_offset = -sbinfo.CursorPosition.X, + y_offset = -to_int(param, 1) + ) + + def rgb2bgr(self, c): + return ((c&1) << 2) | (c&2) | ((c&4)>>2) + + def set_color(self, param): + cols = param.split(';') + sbinfo = self.screen_buffer_info() + attr = sbinfo.Attributes + for c in cols: + c = to_int(c, 0) + if 29 < c < 38: # fgcolor + attr = (attr & 0xfff0) | self.rgb2bgr(c - 30) + elif 39 < c < 48: # bgcolor + attr = (attr & 0xff0f) | (self.rgb2bgr(c - 40) << 4) + elif c == 0: # reset + attr = self._orig_sbinfo.Attributes + elif c == 1: # strong + attr |= 0x08 + elif c == 4: # blink not available -> bg intensity + attr |= 0x80 + elif c == 7: # negative + attr = (attr & 0xff88) | ((attr & 0x70) >> 4) | ((attr & 0x07) << 4) + + windll.kernel32.SetConsoleTextAttribute(self.hconsole, attr) + + def show_cursor(self,param): + self._csinfo.bVisible = 1 + windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) + + def hide_cursor(self,param): + self._csinfo.bVisible = 0 + windll.kernel32.SetConsoleCursorInfo(self.hconsole, byref(self._csinfo)) + + ansi_command_table = { + 'A': move_up, + 'B': move_down, + 'C': move_right, + 'D': move_left, + 'E': next_line, + 'F': prev_line, + 'G': set_column, + 'H': set_cursor, + 'f': set_cursor, + 'J': clear_screen, + 'K': clear_line, + 'h': show_cursor, + 'l': hide_cursor, + 'm': set_color, + 's': push_cursor, + 'u': pop_cursor, + } + # Match either the escape sequence or text not containing escape sequence + ansi_tokens = re.compile(r'(?:\x1b\[([0-9?;]*)([a-zA-Z])|([^\x1b]+))') + def write(self, text): + try: + wlock.acquire() + if self._isatty: + for param, cmd, txt in self.ansi_tokens.findall(text): + if cmd: + cmd_func = self.ansi_command_table.get(cmd) + if cmd_func: + cmd_func(self, param) + else: + self.writeconsole(txt) + else: + # no support for colors in the console, just output the text: + # eclipse or msys may be able to interpret the escape sequences + self.stream.write(text) + finally: + wlock.release() + + def writeconsole(self, txt): + chars_written = c_ulong() + writeconsole = windll.kernel32.WriteConsoleA + if isinstance(txt, _type): + writeconsole = windll.kernel32.WriteConsoleW + + # MSDN says that there is a shared buffer of 64 KB for the console + # writes. Attempt to not get ERROR_NOT_ENOUGH_MEMORY, see waf issue #746 + done = 0 + todo = len(txt) + chunk = 32<<10 + while todo != 0: + doing = min(chunk, todo) + buf = txt[done:done+doing] + r = writeconsole(self.hconsole, buf, doing, byref(chars_written), None) + if r == 0: + chunk >>= 1 + continue + done += doing + todo -= doing + + + def fileno(self): + return self.stream.fileno() + + def flush(self): + pass + + def isatty(self): + return self._isatty + + if sys.stdout.isatty() or sys.stderr.isatty(): + handle = sys.stdout.isatty() and STD_OUTPUT_HANDLE or STD_ERROR_HANDLE + console = windll.kernel32.GetStdHandle(handle) + sbinfo = CONSOLE_SCREEN_BUFFER_INFO() + def get_term_cols(): + windll.kernel32.GetConsoleScreenBufferInfo(console, byref(sbinfo)) + # Issue 1401 - the progress bar cannot reach the last character + return sbinfo.Size.X - 1 + +# just try and see +try: + import struct, fcntl, termios +except ImportError: + pass +else: + if (sys.stdout.isatty() or sys.stderr.isatty()) and os.environ.get('TERM', '') not in ('dumb', 'emacs'): + FD = sys.stdout.isatty() and sys.stdout.fileno() or sys.stderr.fileno() + def fun(): + return struct.unpack("HHHH", fcntl.ioctl(FD, termios.TIOCGWINSZ, struct.pack("HHHH", 0, 0, 0, 0)))[1] + try: + fun() + except Exception as e: + pass + else: + get_term_cols = fun + diff --git a/third_party/waf/waflib/extras/__init__.py b/third_party/waf/waflib/extras/__init__.py new file mode 100644 index 0000000..c8a3c34 --- /dev/null +++ b/third_party/waf/waflib/extras/__init__.py @@ -0,0 +1,3 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2005-2010 (ita) diff --git a/third_party/waf/waflib/extras/batched_cc.py b/third_party/waf/waflib/extras/batched_cc.py new file mode 100644 index 0000000..aad2872 --- /dev/null +++ b/third_party/waf/waflib/extras/batched_cc.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2015 (ita) + +""" +Instead of compiling object files one by one, c/c++ compilers are often able to compile at once: +cc -c ../file1.c ../file2.c ../file3.c + +Files are output on the directory where the compiler is called, and dependencies are more difficult +to track (do not run the command on all source files if only one file changes) +As such, we do as if the files were compiled one by one, but no command is actually run: +replace each cc/cpp Task by a TaskSlave. A new task called TaskMaster collects the +signatures from each slave and finds out the command-line to run. + +Just import this module to start using it: +def build(bld): + bld.load('batched_cc') + +Note that this is provided as an example, unity builds are recommended +for best performance results (fewer tasks and fewer jobs to execute). +See waflib/extras/unity.py. +""" + +from waflib import Task, Utils +from waflib.TaskGen import extension, feature, after_method +from waflib.Tools import c, cxx + +MAX_BATCH = 50 + +c_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}' +c_fun, _ = Task.compile_fun_noshell(c_str) + +cxx_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${tsk.batch_incpaths()} ${DEFINES_ST:DEFINES} -c ${SRCLST} ${CXX_TGT_F_BATCHED} ${CPPFLAGS}' +cxx_fun, _ = Task.compile_fun_noshell(cxx_str) + +count = 70000 +class batch(Task.Task): + color = 'PINK' + + after = ['c', 'cxx'] + before = ['cprogram', 'cshlib', 'cstlib', 'cxxprogram', 'cxxshlib', 'cxxstlib'] + + def uid(self): + return Utils.h_list([Task.Task.uid(self), self.generator.idx, self.generator.path.abspath(), self.generator.target]) + + def __str__(self): + return 'Batch compilation for %d slaves' % len(self.slaves) + + def __init__(self, *k, **kw): + Task.Task.__init__(self, *k, **kw) + self.slaves = [] + self.inputs = [] + self.hasrun = 0 + + global count + count += 1 + self.idx = count + + def add_slave(self, slave): + self.slaves.append(slave) + self.set_run_after(slave) + + def runnable_status(self): + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + + for t in self.slaves: + #if t.executed: + if t.hasrun != Task.SKIPPED: + return Task.RUN_ME + + return Task.SKIP_ME + + def get_cwd(self): + return self.slaves[0].outputs[0].parent + + def batch_incpaths(self): + st = self.env.CPPPATH_ST + return [st % node.abspath() for node in self.generator.includes_nodes] + + def run(self): + self.outputs = [] + + srclst = [] + slaves = [] + for t in self.slaves: + if t.hasrun != Task.SKIPPED: + slaves.append(t) + srclst.append(t.inputs[0].abspath()) + + self.env.SRCLST = srclst + + if self.slaves[0].__class__.__name__ == 'c': + ret = c_fun(self) + else: + ret = cxx_fun(self) + + if ret: + return ret + + for t in slaves: + t.old_post_run() + +def hook(cls_type): + def n_hook(self, node): + + ext = '.obj' if self.env.CC_NAME == 'msvc' else '.o' + name = node.name + k = name.rfind('.') + if k >= 0: + basename = name[:k] + ext + else: + basename = name + ext + + outdir = node.parent.get_bld().make_node('%d' % self.idx) + outdir.mkdir() + out = outdir.find_or_declare(basename) + + task = self.create_task(cls_type, node, out) + + try: + self.compiled_tasks.append(task) + except AttributeError: + self.compiled_tasks = [task] + + if not getattr(self, 'masters', None): + self.masters = {} + self.allmasters = [] + + def fix_path(tsk): + if self.env.CC_NAME == 'msvc': + tsk.env.append_unique('CXX_TGT_F_BATCHED', '/Fo%s\\' % outdir.abspath()) + + if not node.parent in self.masters: + m = self.masters[node.parent] = self.master = self.create_task('batch') + fix_path(m) + self.allmasters.append(m) + else: + m = self.masters[node.parent] + if len(m.slaves) > MAX_BATCH: + m = self.masters[node.parent] = self.master = self.create_task('batch') + fix_path(m) + self.allmasters.append(m) + m.add_slave(task) + return task + return n_hook + +extension('.c')(hook('c')) +extension('.cpp','.cc','.cxx','.C','.c++')(hook('cxx')) + +@feature('cprogram', 'cshlib', 'cstaticlib', 'cxxprogram', 'cxxshlib', 'cxxstlib') +@after_method('apply_link') +def link_after_masters(self): + if getattr(self, 'allmasters', None): + for m in self.allmasters: + self.link_task.set_run_after(m) + +# Modify the c and cxx task classes - in theory it would be best to +# create subclasses and to re-map the c/c++ extensions +for x in ('c', 'cxx'): + t = Task.classes[x] + def run(self): + pass + + def post_run(self): + pass + + setattr(t, 'oldrun', getattr(t, 'run', None)) + setattr(t, 'run', run) + setattr(t, 'old_post_run', t.post_run) + setattr(t, 'post_run', post_run) + diff --git a/third_party/waf/waflib/extras/biber.py b/third_party/waf/waflib/extras/biber.py new file mode 100644 index 0000000..fd9db4e --- /dev/null +++ b/third_party/waf/waflib/extras/biber.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 (ita) + +""" +Latex processing using "biber" +""" + +import os +from waflib import Task, Logs + +from waflib.Tools import tex as texmodule + +class tex(texmodule.tex): + biber_fun, _ = Task.compile_fun('${BIBER} ${BIBERFLAGS} ${SRCFILE}',shell=False) + biber_fun.__doc__ = """ + Execute the program **biber** + """ + + def bibfile(self): + return None + + def bibunits(self): + self.env.env = {} + self.env.env.update(os.environ) + self.env.env.update({'BIBINPUTS': self.texinputs(), 'BSTINPUTS': self.texinputs()}) + self.env.SRCFILE = self.aux_nodes[0].name[:-4] + + if not self.env['PROMPT_LATEX']: + self.env.append_unique('BIBERFLAGS', '--quiet') + + path = self.aux_nodes[0].abspath()[:-4] + '.bcf' + if os.path.isfile(path): + Logs.warn('calling biber') + self.check_status('error when calling biber, check %s.blg for errors' % (self.env.SRCFILE), self.biber_fun()) + else: + super(tex, self).bibfile() + super(tex, self).bibunits() + +class latex(tex): + texfun, vars = Task.compile_fun('${LATEX} ${LATEXFLAGS} ${SRCFILE}', shell=False) +class pdflatex(tex): + texfun, vars = Task.compile_fun('${PDFLATEX} ${PDFLATEXFLAGS} ${SRCFILE}', shell=False) +class xelatex(tex): + texfun, vars = Task.compile_fun('${XELATEX} ${XELATEXFLAGS} ${SRCFILE}', shell=False) + +def configure(self): + """ + Almost the same as in tex.py, but try to detect 'biber' + """ + v = self.env + for p in ' biber tex latex pdflatex xelatex bibtex dvips dvipdf ps2pdf makeindex pdf2ps'.split(): + try: + self.find_program(p, var=p.upper()) + except self.errors.ConfigurationError: + pass + v['DVIPSFLAGS'] = '-Ppdf' + diff --git a/third_party/waf/waflib/extras/bjam.py b/third_party/waf/waflib/extras/bjam.py new file mode 100644 index 0000000..8e04d3a --- /dev/null +++ b/third_party/waf/waflib/extras/bjam.py @@ -0,0 +1,128 @@ +#! /usr/bin/env python +# per rosengren 2011 + +from os import sep, readlink +from waflib import Logs +from waflib.TaskGen import feature, after_method +from waflib.Task import Task, always_run + +def options(opt): + grp = opt.add_option_group('Bjam Options') + grp.add_option('--bjam_src', default=None, help='You can find it in <boost root>/tools/jam/src') + grp.add_option('--bjam_uname', default='linuxx86_64', help='bjam is built in <src>/bin.<uname>/bjam') + grp.add_option('--bjam_config', default=None) + grp.add_option('--bjam_toolset', default=None) + +def configure(cnf): + if not cnf.env.BJAM_SRC: + cnf.env.BJAM_SRC = cnf.options.bjam_src + if not cnf.env.BJAM_UNAME: + cnf.env.BJAM_UNAME = cnf.options.bjam_uname + try: + cnf.find_program('bjam', path_list=[ + cnf.env.BJAM_SRC + sep + 'bin.' + cnf.env.BJAM_UNAME + ]) + except Exception: + cnf.env.BJAM = None + if not cnf.env.BJAM_CONFIG: + cnf.env.BJAM_CONFIG = cnf.options.bjam_config + if not cnf.env.BJAM_TOOLSET: + cnf.env.BJAM_TOOLSET = cnf.options.bjam_toolset + +@feature('bjam') +@after_method('process_rule') +def process_bjam(self): + if not self.bld.env.BJAM: + self.create_task('bjam_creator') + self.create_task('bjam_build') + self.create_task('bjam_installer') + if getattr(self, 'always', False): + always_run(bjam_creator) + always_run(bjam_build) + always_run(bjam_installer) + +class bjam_creator(Task): + ext_out = 'bjam_exe' + vars=['BJAM_SRC', 'BJAM_UNAME'] + def run(self): + env = self.env + gen = self.generator + bjam = gen.bld.root.find_dir(env.BJAM_SRC) + if not bjam: + Logs.error('Can not find bjam source') + return -1 + bjam_exe_relpath = 'bin.' + env.BJAM_UNAME + '/bjam' + bjam_exe = bjam.find_resource(bjam_exe_relpath) + if bjam_exe: + env.BJAM = bjam_exe.srcpath() + return 0 + bjam_cmd = ['./build.sh'] + Logs.debug('runner: ' + bjam.srcpath() + '> ' + str(bjam_cmd)) + result = self.exec_command(bjam_cmd, cwd=bjam.srcpath()) + if not result == 0: + Logs.error('bjam failed') + return -1 + bjam_exe = bjam.find_resource(bjam_exe_relpath) + if bjam_exe: + env.BJAM = bjam_exe.srcpath() + return 0 + Logs.error('bjam failed') + return -1 + +class bjam_build(Task): + ext_in = 'bjam_exe' + ext_out = 'install' + vars = ['BJAM_TOOLSET'] + def run(self): + env = self.env + gen = self.generator + path = gen.path + bld = gen.bld + if hasattr(gen, 'root'): + build_root = path.find_node(gen.root) + else: + build_root = path + jam = bld.srcnode.find_resource(env.BJAM_CONFIG) + if jam: + Logs.debug('bjam: Using jam configuration from ' + jam.srcpath()) + jam_rel = jam.relpath_gen(build_root) + else: + Logs.warn('No build configuration in build_config/user-config.jam. Using default') + jam_rel = None + bjam_exe = bld.srcnode.find_node(env.BJAM) + if not bjam_exe: + Logs.error('env.BJAM is not set') + return -1 + bjam_exe_rel = bjam_exe.relpath_gen(build_root) + cmd = ([bjam_exe_rel] + + (['--user-config=' + jam_rel] if jam_rel else []) + + ['--stagedir=' + path.get_bld().path_from(build_root)] + + ['--debug-configuration'] + + ['--with-' + lib for lib in self.generator.target] + + (['toolset=' + env.BJAM_TOOLSET] if env.BJAM_TOOLSET else []) + + ['link=' + 'shared'] + + ['variant=' + 'release'] + ) + Logs.debug('runner: ' + build_root.srcpath() + '> ' + str(cmd)) + ret = self.exec_command(cmd, cwd=build_root.srcpath()) + if ret != 0: + return ret + self.set_outputs(path.get_bld().ant_glob('lib/*') + path.get_bld().ant_glob('bin/*')) + return 0 + +class bjam_installer(Task): + ext_in = 'install' + def run(self): + gen = self.generator + path = gen.path + for idir, pat in (('${LIBDIR}', 'lib/*'), ('${BINDIR}', 'bin/*')): + files = [] + for n in path.get_bld().ant_glob(pat): + try: + t = readlink(n.srcpath()) + gen.bld.symlink_as(sep.join([idir, n.name]), t, postpone=False) + except OSError: + files.append(n) + gen.bld.install_files(idir, files, postpone=False) + return 0 + diff --git a/third_party/waf/waflib/extras/blender.py b/third_party/waf/waflib/extras/blender.py new file mode 100644 index 0000000..e5efc28 --- /dev/null +++ b/third_party/waf/waflib/extras/blender.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Michal Proszek, 2014 (poxip) + +""" +Detect the version of Blender, path +and install the extension: + + def options(opt): + opt.load('blender') + def configure(cnf): + cnf.load('blender') + def build(bld): + bld(name='io_mesh_raw', + feature='blender', + files=['file1.py', 'file2.py'] + ) +If name variable is empty, files are installed in scripts/addons, otherwise scripts/addons/name +Use ./waf configure --system to set the installation directory to system path +""" +import os +import re +from getpass import getuser + +from waflib import Utils +from waflib.TaskGen import feature +from waflib.Configure import conf + +def options(opt): + opt.add_option( + '-s', '--system', + dest='directory_system', + default=False, + action='store_true', + help='determines installation directory (default: user)' + ) + +@conf +def find_blender(ctx): + '''Return version number of blender, if not exist return None''' + blender = ctx.find_program('blender') + output = ctx.cmd_and_log(blender + ['--version']) + m = re.search(r'Blender\s*((\d+(\.|))*)', output) + if not m: + ctx.fatal('Could not retrieve blender version') + + try: + blender_version = m.group(1) + except IndexError: + ctx.fatal('Could not retrieve blender version') + + ctx.env['BLENDER_VERSION'] = blender_version + return blender + +@conf +def configure_paths(ctx): + """Setup blender paths""" + # Get the username + user = getuser() + _platform = Utils.unversioned_sys_platform() + config_path = {'user': '', 'system': ''} + if _platform.startswith('linux'): + config_path['user'] = '/home/%s/.config/blender/' % user + config_path['system'] = '/usr/share/blender/' + elif _platform == 'darwin': + # MAC OS X + config_path['user'] = \ + '/Users/%s/Library/Application Support/Blender/' % user + config_path['system'] = '/Library/Application Support/Blender/' + elif Utils.is_win32: + # Windows + appdata_path = ctx.getenv('APPDATA').replace('\\', '/') + homedrive = ctx.getenv('HOMEDRIVE').replace('\\', '/') + + config_path['user'] = '%s/Blender Foundation/Blender/' % appdata_path + config_path['system'] = \ + '%sAll Users/AppData/Roaming/Blender Foundation/Blender/' % homedrive + else: + ctx.fatal( + 'Unsupported platform. ' + 'Available platforms: Linux, OSX, MS-Windows.' + ) + + blender_version = ctx.env['BLENDER_VERSION'] + + config_path['user'] += blender_version + '/' + config_path['system'] += blender_version + '/' + + ctx.env['BLENDER_CONFIG_DIR'] = os.path.abspath(config_path['user']) + if ctx.options.directory_system: + ctx.env['BLENDER_CONFIG_DIR'] = config_path['system'] + + ctx.env['BLENDER_ADDONS_DIR'] = os.path.join( + ctx.env['BLENDER_CONFIG_DIR'], 'scripts/addons' + ) + Utils.check_dir(ctx.env['BLENDER_ADDONS_DIR']) + +def configure(ctx): + ctx.find_blender() + ctx.configure_paths() + +@feature('blender_list') +def blender(self): + # Two ways to install a blender extension: as a module or just .py files + dest_dir = os.path.join(self.env.BLENDER_ADDONS_DIR, self.get_name()) + Utils.check_dir(dest_dir) + self.add_install_files(install_to=dest_dir, install_from=getattr(self, 'files', '.')) + diff --git a/third_party/waf/waflib/extras/boo.py b/third_party/waf/waflib/extras/boo.py new file mode 100644 index 0000000..06623d4 --- /dev/null +++ b/third_party/waf/waflib/extras/boo.py @@ -0,0 +1,81 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Yannick LM 2011 + +""" +Support for the boo programming language, for example:: + + bld(features = "boo", # necessary feature + source = "src.boo", # list of boo files + gen = "world.dll", # target + type = "library", # library/exe ("-target:xyz" flag) + name = "world" # necessary if the target is referenced by 'use' + ) +""" + +from waflib import Task +from waflib.Configure import conf +from waflib.TaskGen import feature, after_method, before_method, extension + +@extension('.boo') +def boo_hook(self, node): + # Nothing here yet ... + # TODO filter the non-boo source files in 'apply_booc' and remove this method + pass + +@feature('boo') +@before_method('process_source') +def apply_booc(self): + """Create a booc task """ + src_nodes = self.to_nodes(self.source) + out_node = self.path.find_or_declare(self.gen) + + self.boo_task = self.create_task('booc', src_nodes, [out_node]) + + # Set variables used by the 'booc' task + self.boo_task.env.OUT = '-o:%s' % out_node.abspath() + + # type is "exe" by default + type = getattr(self, "type", "exe") + self.boo_task.env.BOO_TARGET_TYPE = "-target:%s" % type + +@feature('boo') +@after_method('apply_boo') +def use_boo(self): + """" + boo applications honor the **use** keyword:: + """ + dep_names = self.to_list(getattr(self, 'use', [])) + for dep_name in dep_names: + dep_task_gen = self.bld.get_tgen_by_name(dep_name) + if not dep_task_gen: + continue + dep_task_gen.post() + dep_task = getattr(dep_task_gen, 'boo_task', None) + if not dep_task: + # Try a cs task: + dep_task = getattr(dep_task_gen, 'cs_task', None) + if not dep_task: + # Try a link task: + dep_task = getattr(dep_task, 'link_task', None) + if not dep_task: + # Abort ... + continue + self.boo_task.set_run_after(dep_task) # order + self.boo_task.dep_nodes.extend(dep_task.outputs) # dependency + self.boo_task.env.append_value('BOO_FLAGS', '-reference:%s' % dep_task.outputs[0].abspath()) + +class booc(Task.Task): + """Compiles .boo files """ + color = 'YELLOW' + run_str = '${BOOC} ${BOO_FLAGS} ${BOO_TARGET_TYPE} ${OUT} ${SRC}' + +@conf +def check_booc(self): + self.find_program('booc', 'BOOC') + self.env.BOO_FLAGS = ['-nologo'] + +def configure(self): + """Check that booc is available """ + self.check_booc() + diff --git a/third_party/waf/waflib/extras/boost.py b/third_party/waf/waflib/extras/boost.py new file mode 100644 index 0000000..93b312a --- /dev/null +++ b/third_party/waf/waflib/extras/boost.py @@ -0,0 +1,526 @@ +#!/usr/bin/env python +# encoding: utf-8 +# +# partially based on boost.py written by Gernot Vormayr +# written by Ruediger Sonderfeld <ruediger@c-plusplus.de>, 2008 +# modified by Bjoern Michaelsen, 2008 +# modified by Luca Fossati, 2008 +# rewritten for waf 1.5.1, Thomas Nagy, 2008 +# rewritten for waf 1.6.2, Sylvain Rouquette, 2011 + +''' + +This is an extra tool, not bundled with the default waf binary. +To add the boost tool to the waf file: +$ ./waf-light --tools=compat15,boost + or, if you have waf >= 1.6.2 +$ ./waf update --files=boost + +When using this tool, the wscript will look like: + + def options(opt): + opt.load('compiler_cxx boost') + + def configure(conf): + conf.load('compiler_cxx boost') + conf.check_boost(lib='system filesystem') + + def build(bld): + bld(source='main.cpp', target='app', use='BOOST') + +Options are generated, in order to specify the location of boost includes/libraries. +The `check_boost` configuration function allows to specify the used boost libraries. +It can also provide default arguments to the --boost-mt command-line arguments. +Everything will be packaged together in a BOOST component that you can use. + +When using MSVC, a lot of compilation flags need to match your BOOST build configuration: + - you may have to add /EHsc to your CXXFLAGS or define boost::throw_exception if BOOST_NO_EXCEPTIONS is defined. + Errors: C4530 + - boost libraries will try to be smart and use the (pretty but often not useful) auto-linking feature of MSVC + So before calling `conf.check_boost` you might want to disabling by adding + conf.env.DEFINES_BOOST += ['BOOST_ALL_NO_LIB'] + Errors: + - boost might also be compiled with /MT, which links the runtime statically. + If you have problems with redefined symbols, + self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB'] + self.env['CXXFLAGS_%s' % var] += ['/MD', '/EHsc'] +Passing `--boost-linkage_autodetect` might help ensuring having a correct linkage in some basic cases. + +''' + +import sys +import re +from waflib import Utils, Logs, Errors +from waflib.Configure import conf +from waflib.TaskGen import feature, after_method + +BOOST_LIBS = ['/usr/lib', '/usr/local/lib', '/opt/local/lib', '/sw/lib', '/lib'] +BOOST_INCLUDES = ['/usr/include', '/usr/local/include', '/opt/local/include', '/sw/include'] +BOOST_VERSION_FILE = 'boost/version.hpp' +BOOST_VERSION_CODE = ''' +#include <iostream> +#include <boost/version.hpp> +int main() { std::cout << BOOST_LIB_VERSION << ":" << BOOST_VERSION << std::endl; } +''' + +BOOST_ERROR_CODE = ''' +#include <boost/system/error_code.hpp> +int main() { boost::system::error_code c; } +''' + +PTHREAD_CODE = ''' +#include <pthread.h> +static void* f(void*) { return 0; } +int main() { + pthread_t th; + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_create(&th, &attr, &f, 0); + pthread_join(th, 0); + pthread_cleanup_push(0, 0); + pthread_cleanup_pop(0); + pthread_attr_destroy(&attr); +} +''' + +BOOST_THREAD_CODE = ''' +#include <boost/thread.hpp> +int main() { boost::thread t; } +''' + +BOOST_LOG_CODE = ''' +#include <boost/log/trivial.hpp> +#include <boost/log/utility/setup/console.hpp> +#include <boost/log/utility/setup/common_attributes.hpp> +int main() { + using namespace boost::log; + add_common_attributes(); + add_console_log(std::clog, keywords::format = "%Message%"); + BOOST_LOG_TRIVIAL(debug) << "log is working" << std::endl; +} +''' + +# toolsets from {boost_dir}/tools/build/v2/tools/common.jam +PLATFORM = Utils.unversioned_sys_platform() +detect_intel = lambda env: (PLATFORM == 'win32') and 'iw' or 'il' +detect_clang = lambda env: (PLATFORM == 'darwin') and 'clang-darwin' or 'clang' +detect_mingw = lambda env: (re.search('MinGW', env.CXX[0])) and 'mgw' or 'gcc' +BOOST_TOOLSETS = { + 'borland': 'bcb', + 'clang': detect_clang, + 'como': 'como', + 'cw': 'cw', + 'darwin': 'xgcc', + 'edg': 'edg', + 'g++': detect_mingw, + 'gcc': detect_mingw, + 'icpc': detect_intel, + 'intel': detect_intel, + 'kcc': 'kcc', + 'kylix': 'bck', + 'mipspro': 'mp', + 'mingw': 'mgw', + 'msvc': 'vc', + 'qcc': 'qcc', + 'sun': 'sw', + 'sunc++': 'sw', + 'tru64cxx': 'tru', + 'vacpp': 'xlc' +} + + +def options(opt): + opt = opt.add_option_group('Boost Options') + opt.add_option('--boost-includes', type='string', + default='', dest='boost_includes', + help='''path to the directory where the boost includes are, + e.g., /path/to/boost_1_55_0/stage/include''') + opt.add_option('--boost-libs', type='string', + default='', dest='boost_libs', + help='''path to the directory where the boost libs are, + e.g., path/to/boost_1_55_0/stage/lib''') + opt.add_option('--boost-mt', action='store_true', + default=False, dest='boost_mt', + help='select multi-threaded libraries') + opt.add_option('--boost-abi', type='string', default='', dest='boost_abi', + help='''select libraries with tags (gd for debug, static is automatically added), + see doc Boost, Getting Started, chapter 6.1''') + opt.add_option('--boost-linkage_autodetect', action="store_true", dest='boost_linkage_autodetect', + help="auto-detect boost linkage options (don't get used to it / might break other stuff)") + opt.add_option('--boost-toolset', type='string', + default='', dest='boost_toolset', + help='force a toolset e.g. msvc, vc90, \ + gcc, mingw, mgw45 (default: auto)') + py_version = '%d%d' % (sys.version_info[0], sys.version_info[1]) + opt.add_option('--boost-python', type='string', + default=py_version, dest='boost_python', + help='select the lib python with this version \ + (default: %s)' % py_version) + + +@conf +def __boost_get_version_file(self, d): + if not d: + return None + dnode = self.root.find_dir(d) + if dnode: + return dnode.find_node(BOOST_VERSION_FILE) + return None + +@conf +def boost_get_version(self, d): + """silently retrieve the boost version number""" + node = self.__boost_get_version_file(d) + if node: + try: + txt = node.read() + except EnvironmentError: + Logs.error("Could not read the file %r", node.abspath()) + else: + re_but1 = re.compile('^#define\\s+BOOST_LIB_VERSION\\s+"(.+)"', re.M) + m1 = re_but1.search(txt) + re_but2 = re.compile('^#define\\s+BOOST_VERSION\\s+(\\d+)', re.M) + m2 = re_but2.search(txt) + if m1 and m2: + return (m1.group(1), m2.group(1)) + return self.check_cxx(fragment=BOOST_VERSION_CODE, includes=[d], execute=True, define_ret=True).split(":") + +@conf +def boost_get_includes(self, *k, **kw): + includes = k and k[0] or kw.get('includes') + if includes and self.__boost_get_version_file(includes): + return includes + for d in self.environ.get('INCLUDE', '').split(';') + BOOST_INCLUDES: + if self.__boost_get_version_file(d): + return d + if includes: + self.end_msg('headers not found in %s' % includes) + self.fatal('The configuration failed') + else: + self.end_msg('headers not found, please provide a --boost-includes argument (see help)') + self.fatal('The configuration failed') + + +@conf +def boost_get_toolset(self, cc): + toolset = cc + if not cc: + build_platform = Utils.unversioned_sys_platform() + if build_platform in BOOST_TOOLSETS: + cc = build_platform + else: + cc = self.env.CXX_NAME + if cc in BOOST_TOOLSETS: + toolset = BOOST_TOOLSETS[cc] + return isinstance(toolset, str) and toolset or toolset(self.env) + + +@conf +def __boost_get_libs_path(self, *k, **kw): + ''' return the lib path and all the files in it ''' + if 'files' in kw: + return self.root.find_dir('.'), Utils.to_list(kw['files']) + libs = k and k[0] or kw.get('libs') + if libs: + path = self.root.find_dir(libs) + files = path.ant_glob('*boost_*') + if not libs or not files: + for d in self.environ.get('LIB', '').split(';') + BOOST_LIBS: + if not d: + continue + path = self.root.find_dir(d) + if path: + files = path.ant_glob('*boost_*') + if files: + break + path = self.root.find_dir(d + '64') + if path: + files = path.ant_glob('*boost_*') + if files: + break + if not path: + if libs: + self.end_msg('libs not found in %s' % libs) + self.fatal('The configuration failed') + else: + self.end_msg('libs not found, please provide a --boost-libs argument (see help)') + self.fatal('The configuration failed') + + self.to_log('Found the boost path in %r with the libraries:' % path) + for x in files: + self.to_log(' %r' % x) + return path, files + +@conf +def boost_get_libs(self, *k, **kw): + ''' + return the lib path and the required libs + according to the parameters + ''' + path, files = self.__boost_get_libs_path(**kw) + files = sorted(files, key=lambda f: (len(f.name), f.name), reverse=True) + toolset = self.boost_get_toolset(kw.get('toolset', '')) + toolset_pat = '(-%s[0-9]{0,3})' % toolset + version = '-%s' % self.env.BOOST_VERSION + + def find_lib(re_lib, files): + for file in files: + if re_lib.search(file.name): + self.to_log('Found boost lib %s' % file) + return file + return None + + # extensions from Tools.ccroot.lib_patterns + wo_ext = re.compile(r"\.(a|so|lib|dll|dylib)(\.[0-9\.]+)?$") + def format_lib_name(name): + if name.startswith('lib') and self.env.CC_NAME != 'msvc': + name = name[3:] + return wo_ext.sub("", name) + + def match_libs(lib_names, is_static): + libs = [] + lib_names = Utils.to_list(lib_names) + if not lib_names: + return libs + t = [] + if kw.get('mt', False): + t.append('-mt') + if kw.get('abi'): + t.append('%s%s' % (is_static and '-s' or '-', kw['abi'])) + elif is_static: + t.append('-s') + tags_pat = t and ''.join(t) or '' + ext = is_static and self.env.cxxstlib_PATTERN or self.env.cxxshlib_PATTERN + ext = ext.partition('%s')[2] # remove '%s' or 'lib%s' from PATTERN + + for lib in lib_names: + if lib == 'python': + # for instance, with python='27', + # accepts '-py27', '-py2', '27', '-2.7' and '2' + # but will reject '-py3', '-py26', '26' and '3' + tags = '({0})?((-py{2})|(-py{1}(?=[^0-9]))|({2})|(-{1}.{3})|({1}(?=[^0-9]))|(?=[^0-9])(?!-py))'.format(tags_pat, kw['python'][0], kw['python'], kw['python'][1]) + else: + tags = tags_pat + # Trying libraries, from most strict match to least one + for pattern in ['boost_%s%s%s%s%s$' % (lib, toolset_pat, tags, version, ext), + 'boost_%s%s%s%s$' % (lib, tags, version, ext), + # Give up trying to find the right version + 'boost_%s%s%s%s$' % (lib, toolset_pat, tags, ext), + 'boost_%s%s%s$' % (lib, tags, ext), + 'boost_%s%s$' % (lib, ext), + 'boost_%s' % lib]: + self.to_log('Trying pattern %s' % pattern) + file = find_lib(re.compile(pattern), files) + if file: + libs.append(format_lib_name(file.name)) + break + else: + self.end_msg('lib %s not found in %s' % (lib, path.abspath())) + self.fatal('The configuration failed') + return libs + + return path.abspath(), match_libs(kw.get('lib'), False), match_libs(kw.get('stlib'), True) + +@conf +def _check_pthread_flag(self, *k, **kw): + ''' + Computes which flags should be added to CXXFLAGS and LINKFLAGS to compile in multi-threading mode + + Yes, we *need* to put the -pthread thing in CPPFLAGS because with GCC3, + boost/thread.hpp will trigger a #error if -pthread isn't used: + boost/config/requires_threads.hpp:47:5: #error "Compiler threading support + is not turned on. Please set the correct command line options for + threading: -pthread (Linux), -pthreads (Solaris) or -mthreads (Mingw32)" + + Based on _BOOST_PTHREAD_FLAG(): https://github.com/tsuna/boost.m4/blob/master/build-aux/boost.m4 + ''' + + var = kw.get('uselib_store', 'BOOST') + + self.start_msg('Checking the flags needed to use pthreads') + + # The ordering *is* (sometimes) important. Some notes on the + # individual items follow: + # (none): in case threads are in libc; should be tried before -Kthread and + # other compiler flags to prevent continual compiler warnings + # -lpthreads: AIX (must check this before -lpthread) + # -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) + # -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) + # -llthread: LinuxThreads port on FreeBSD (also preferred to -pthread) + # -pthread: GNU Linux/GCC (kernel threads), BSD/GCC (userland threads) + # -pthreads: Solaris/GCC + # -mthreads: MinGW32/GCC, Lynx/GCC + # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it + # doesn't hurt to check since this sometimes defines pthreads too; + # also defines -D_REENTRANT) + # ... -mt is also the pthreads flag for HP/aCC + # -lpthread: GNU Linux, etc. + # --thread-safe: KAI C++ + if Utils.unversioned_sys_platform() == "sunos": + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + boost_pthread_flags = ["-pthreads", "-lpthread", "-mt", "-pthread"] + else: + boost_pthread_flags = ["", "-lpthreads", "-Kthread", "-kthread", "-llthread", "-pthread", + "-pthreads", "-mthreads", "-lpthread", "--thread-safe", "-mt"] + + for boost_pthread_flag in boost_pthread_flags: + try: + self.env.stash() + self.env.append_value('CXXFLAGS_%s' % var, boost_pthread_flag) + self.env.append_value('LINKFLAGS_%s' % var, boost_pthread_flag) + self.check_cxx(code=PTHREAD_CODE, msg=None, use=var, execute=False) + + self.end_msg(boost_pthread_flag) + return + except self.errors.ConfigurationError: + self.env.revert() + self.end_msg('None') + +@conf +def check_boost(self, *k, **kw): + """ + Initialize boost libraries to be used. + + Keywords: you can pass the same parameters as with the command line (without "--boost-"). + Note that the command line has the priority, and should preferably be used. + """ + if not self.env['CXX']: + self.fatal('load a c++ compiler first, conf.load("compiler_cxx")') + + params = { + 'lib': k and k[0] or kw.get('lib'), + 'stlib': kw.get('stlib') + } + for key, value in self.options.__dict__.items(): + if not key.startswith('boost_'): + continue + key = key[len('boost_'):] + params[key] = value and value or kw.get(key, '') + + var = kw.get('uselib_store', 'BOOST') + + self.find_program('dpkg-architecture', var='DPKG_ARCHITECTURE', mandatory=False) + if self.env.DPKG_ARCHITECTURE: + deb_host_multiarch = self.cmd_and_log([self.env.DPKG_ARCHITECTURE[0], '-qDEB_HOST_MULTIARCH']) + BOOST_LIBS.insert(0, '/usr/lib/%s' % deb_host_multiarch.strip()) + + self.start_msg('Checking boost includes') + self.env['INCLUDES_%s' % var] = inc = self.boost_get_includes(**params) + versions = self.boost_get_version(inc) + self.env.BOOST_VERSION = versions[0] + self.env.BOOST_VERSION_NUMBER = int(versions[1]) + self.end_msg("%d.%d.%d" % (int(versions[1]) / 100000, + int(versions[1]) / 100 % 1000, + int(versions[1]) % 100)) + if Logs.verbose: + Logs.pprint('CYAN', ' path : %s' % self.env['INCLUDES_%s' % var]) + + if not params['lib'] and not params['stlib']: + return + if 'static' in kw or 'static' in params: + Logs.warn('boost: static parameter is deprecated, use stlib instead.') + self.start_msg('Checking boost libs') + path, libs, stlibs = self.boost_get_libs(**params) + self.env['LIBPATH_%s' % var] = [path] + self.env['STLIBPATH_%s' % var] = [path] + self.env['LIB_%s' % var] = libs + self.env['STLIB_%s' % var] = stlibs + self.end_msg('ok') + if Logs.verbose: + Logs.pprint('CYAN', ' path : %s' % path) + Logs.pprint('CYAN', ' shared libs : %s' % libs) + Logs.pprint('CYAN', ' static libs : %s' % stlibs) + + def has_shlib(lib): + return params['lib'] and lib in params['lib'] + def has_stlib(lib): + return params['stlib'] and lib in params['stlib'] + def has_lib(lib): + return has_shlib(lib) or has_stlib(lib) + if has_lib('thread'): + # not inside try_link to make check visible in the output + self._check_pthread_flag(k, kw) + + def try_link(): + if has_lib('system'): + self.check_cxx(fragment=BOOST_ERROR_CODE, use=var, execute=False) + if has_lib('thread'): + self.check_cxx(fragment=BOOST_THREAD_CODE, use=var, execute=False) + if has_lib('log'): + if not has_lib('thread'): + self.env['DEFINES_%s' % var] += ['BOOST_LOG_NO_THREADS'] + if has_shlib('log'): + self.env['DEFINES_%s' % var] += ['BOOST_LOG_DYN_LINK'] + self.check_cxx(fragment=BOOST_LOG_CODE, use=var, execute=False) + + if params.get('linkage_autodetect', False): + self.start_msg("Attempting to detect boost linkage flags") + toolset = self.boost_get_toolset(kw.get('toolset', '')) + if toolset in ('vc',): + # disable auto-linking feature, causing error LNK1181 + # because the code wants to be linked against + self.env['DEFINES_%s' % var] += ['BOOST_ALL_NO_LIB'] + + # if no dlls are present, we guess the .lib files are not stubs + has_dlls = False + for x in Utils.listdir(path): + if x.endswith(self.env.cxxshlib_PATTERN % ''): + has_dlls = True + break + if not has_dlls: + self.env['STLIBPATH_%s' % var] = [path] + self.env['STLIB_%s' % var] = libs + del self.env['LIB_%s' % var] + del self.env['LIBPATH_%s' % var] + + # we attempt to play with some known-to-work CXXFLAGS combinations + for cxxflags in (['/MD', '/EHsc'], []): + self.env.stash() + self.env["CXXFLAGS_%s" % var] += cxxflags + try: + try_link() + except Errors.ConfigurationError as e: + self.env.revert() + exc = e + else: + self.end_msg("ok: winning cxxflags combination: %s" % (self.env["CXXFLAGS_%s" % var])) + exc = None + self.env.commit() + break + + if exc is not None: + self.end_msg("Could not auto-detect boost linking flags combination, you may report it to boost.py author", ex=exc) + self.fatal('The configuration failed') + else: + self.end_msg("Boost linkage flags auto-detection not implemented (needed ?) for this toolchain") + self.fatal('The configuration failed') + else: + self.start_msg('Checking for boost linkage') + try: + try_link() + except Errors.ConfigurationError as e: + self.end_msg("Could not link against boost libraries using supplied options") + self.fatal('The configuration failed') + self.end_msg('ok') + + +@feature('cxx') +@after_method('apply_link') +def install_boost(self): + if install_boost.done or not Utils.is_win32 or not self.bld.cmd.startswith('install'): + return + install_boost.done = True + inst_to = getattr(self, 'install_path', '${BINDIR}') + for lib in self.env.LIB_BOOST: + try: + file = self.bld.find_file(self.env.cxxshlib_PATTERN % lib, self.env.LIBPATH_BOOST) + self.add_install_files(install_to=inst_to, install_from=self.bld.root.find_node(file)) + except: + continue +install_boost.done = False diff --git a/third_party/waf/waflib/extras/build_file_tracker.py b/third_party/waf/waflib/extras/build_file_tracker.py new file mode 100644 index 0000000..c4f26fd --- /dev/null +++ b/third_party/waf/waflib/extras/build_file_tracker.py @@ -0,0 +1,28 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2015 + +""" +Force files to depend on the timestamps of those located in the build directory. You may +want to use this to force partial rebuilds, see playground/track_output_files/ for a working example. + +Note that there is a variety of ways to implement this, one may want use timestamps on source files too for example, +or one may want to hash the files in the source directory only under certain conditions (md5_tstamp tool) +or to hash the file in the build directory with its timestamp +""" + +import os +from waflib import Node, Utils + +def get_bld_sig(self): + if not self.is_bld() or self.ctx.bldnode is self.ctx.srcnode: + return Utils.h_file(self.abspath()) + + try: + # add the creation time to the signature + return self.sig + str(os.stat(self.abspath()).st_mtime) + except AttributeError: + return None + +Node.Node.get_bld_sig = get_bld_sig + diff --git a/third_party/waf/waflib/extras/build_logs.py b/third_party/waf/waflib/extras/build_logs.py new file mode 100644 index 0000000..cdf8ed0 --- /dev/null +++ b/third_party/waf/waflib/extras/build_logs.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2013 (ita) + +""" +A system for recording all outputs to a log file. Just add the following to your wscript file:: + + def init(ctx): + ctx.load('build_logs') +""" + +import atexit, sys, time, os, shutil, threading +from waflib import ansiterm, Logs, Context + +# adding the logs under the build/ directory will clash with the clean/ command +try: + up = os.path.dirname(Context.g_module.__file__) +except AttributeError: + up = '.' +LOGFILE = os.path.join(up, 'logs', time.strftime('%Y_%m_%d_%H_%M.log')) + +wlock = threading.Lock() +class log_to_file(object): + def __init__(self, stream, fileobj, filename): + self.stream = stream + self.encoding = self.stream.encoding + self.fileobj = fileobj + self.filename = filename + self.is_valid = True + def replace_colors(self, data): + for x in Logs.colors_lst.values(): + if isinstance(x, str): + data = data.replace(x, '') + return data + def write(self, data): + try: + wlock.acquire() + self.stream.write(data) + self.stream.flush() + if self.is_valid: + self.fileobj.write(self.replace_colors(data)) + finally: + wlock.release() + def fileno(self): + return self.stream.fileno() + def flush(self): + self.stream.flush() + if self.is_valid: + self.fileobj.flush() + def isatty(self): + return self.stream.isatty() + +def init(ctx): + global LOGFILE + filename = os.path.abspath(LOGFILE) + try: + os.makedirs(os.path.dirname(os.path.abspath(filename))) + except OSError: + pass + + if hasattr(os, 'O_NOINHERIT'): + fd = os.open(LOGFILE, os.O_CREAT | os.O_TRUNC | os.O_WRONLY | os.O_NOINHERIT) + fileobj = os.fdopen(fd, 'w') + else: + fileobj = open(LOGFILE, 'w') + old_stderr = sys.stderr + + # sys.stdout has already been replaced, so __stdout__ will be faster + #sys.stdout = log_to_file(sys.stdout, fileobj, filename) + #sys.stderr = log_to_file(sys.stderr, fileobj, filename) + def wrap(stream): + if stream.isatty(): + return ansiterm.AnsiTerm(stream) + return stream + sys.stdout = log_to_file(wrap(sys.__stdout__), fileobj, filename) + sys.stderr = log_to_file(wrap(sys.__stderr__), fileobj, filename) + + # now mess with the logging module... + for x in Logs.log.handlers: + try: + stream = x.stream + except AttributeError: + pass + else: + if id(stream) == id(old_stderr): + x.stream = sys.stderr + +def exit_cleanup(): + try: + fileobj = sys.stdout.fileobj + except AttributeError: + pass + else: + sys.stdout.is_valid = False + sys.stderr.is_valid = False + fileobj.close() + filename = sys.stdout.filename + + Logs.info('Output logged to %r', filename) + + # then copy the log file to "latest.log" if possible + up = os.path.dirname(os.path.abspath(filename)) + try: + shutil.copy(filename, os.path.join(up, 'latest.log')) + except OSError: + # this may fail on windows due to processes spawned + pass + +atexit.register(exit_cleanup) + diff --git a/third_party/waf/waflib/extras/buildcopy.py b/third_party/waf/waflib/extras/buildcopy.py new file mode 100644 index 0000000..eaff7e6 --- /dev/null +++ b/third_party/waf/waflib/extras/buildcopy.py @@ -0,0 +1,85 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Calle Rosenquist, 2017 (xbreak) +""" +Create task that copies source files to the associated build node. +This is useful to e.g. construct a complete Python package so it can be unit tested +without installation. + +Source files to be copied can be specified either in `buildcopy_source` attribute, or +`source` attribute. If both are specified `buildcopy_source` has priority. + +Examples:: + + def build(bld): + bld(name = 'bar', + features = 'py buildcopy', + source = bld.path.ant_glob('src/bar/*.py')) + + bld(name = 'py baz', + features = 'buildcopy', + buildcopy_source = bld.path.ant_glob('src/bar/*.py') + ['src/bar/resource.txt']) + +""" +import os, shutil +from waflib import Errors, Task, TaskGen, Utils, Node, Logs + +@TaskGen.before_method('process_source') +@TaskGen.feature('buildcopy') +def make_buildcopy(self): + """ + Creates the buildcopy task. + """ + def to_src_nodes(lst): + """Find file nodes only in src, TaskGen.to_nodes will not work for this since it gives + preference to nodes in build. + """ + if isinstance(lst, Node.Node): + if not lst.is_src(): + raise Errors.WafError('buildcopy: node %s is not in src'%lst) + if not os.path.isfile(lst.abspath()): + raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%lst) + return lst + + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + node = self.bld.path.get_src().search_node(lst) + if node: + if not os.path.isfile(node.abspath()): + raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%node) + return node + + node = self.bld.path.get_src().find_node(lst) + if node: + if not os.path.isfile(node.abspath()): + raise Errors.WafError('buildcopy: Cannot copy directory %s (unsupported action)'%node) + return node + raise Errors.WafError('buildcopy: File not found in src: %s'%os.path.join(*lst)) + + nodes = [ to_src_nodes(n) for n in getattr(self, 'buildcopy_source', getattr(self, 'source', [])) ] + if not nodes: + Logs.warn('buildcopy: No source files provided to buildcopy in %s (set `buildcopy_source` or `source`)', + self) + return + node_pairs = [(n, n.get_bld()) for n in nodes] + self.create_task('buildcopy', [n[0] for n in node_pairs], [n[1] for n in node_pairs], node_pairs=node_pairs) + +class buildcopy(Task.Task): + """ + Copy for each pair `n` in `node_pairs`: n[0] -> n[1]. + + Attribute `node_pairs` should contain a list of tuples describing source and target: + + node_pairs = [(in, out), ...] + + """ + color = 'PINK' + + def keyword(self): + return 'Copying' + + def run(self): + for f,t in self.node_pairs: + t.parent.mkdir() + shutil.copy2(f.abspath(), t.abspath()) diff --git a/third_party/waf/waflib/extras/c_bgxlc.py b/third_party/waf/waflib/extras/c_bgxlc.py new file mode 100644 index 0000000..6e3eaf7 --- /dev/null +++ b/third_party/waf/waflib/extras/c_bgxlc.py @@ -0,0 +1,32 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +""" +IBM XL Compiler for Blue Gene +""" + +from waflib.Tools import ccroot,ar +from waflib.Configure import conf + +from waflib.Tools import xlc # method xlc_common_flags +from waflib.Tools.compiler_c import c_compiler +c_compiler['linux'].append('c_bgxlc') + +@conf +def find_bgxlc(conf): + cc = conf.find_program(['bgxlc_r','bgxlc'], var='CC') + conf.get_xlc_version(cc) + conf.env.CC = cc + conf.env.CC_NAME = 'bgxlc' + +def configure(conf): + conf.find_bgxlc() + conf.find_ar() + conf.xlc_common_flags() + conf.env.LINKFLAGS_cshlib = ['-G','-Wl,-bexpfull'] + conf.env.LINKFLAGS_cprogram = [] + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/extras/c_dumbpreproc.py b/third_party/waf/waflib/extras/c_dumbpreproc.py new file mode 100644 index 0000000..1fdd5c3 --- /dev/null +++ b/third_party/waf/waflib/extras/c_dumbpreproc.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2010 (ita) + +""" +Dumb C/C++ preprocessor for finding dependencies + +It will look at all include files it can find after removing the comments, so the following +will always add the dependency on both "a.h" and "b.h":: + + #include "a.h" + #ifdef B + #include "b.h" + #endif + int main() { + return 0; + } + +To use:: + + def configure(conf): + conf.load('compiler_c') + conf.load('c_dumbpreproc') +""" + +import re +from waflib.Tools import c_preproc + +re_inc = re.compile( + '^[ \t]*(#|%:)[ \t]*(include)[ \t]*[<"](.*)[>"]\r*$', + re.IGNORECASE | re.MULTILINE) + +def lines_includes(node): + code = node.read() + if c_preproc.use_trigraphs: + for (a, b) in c_preproc.trig_def: + code = code.split(a).join(b) + code = c_preproc.re_nl.sub('', code) + code = c_preproc.re_cpp.sub(c_preproc.repl, code) + return [(m.group(2), m.group(3)) for m in re.finditer(re_inc, code)] + +parser = c_preproc.c_parser +class dumb_parser(parser): + def addlines(self, node): + if node in self.nodes[:-1]: + return + self.currentnode_stack.append(node.parent) + + # Avoid reading the same files again + try: + lines = self.parse_cache[node] + except KeyError: + lines = self.parse_cache[node] = lines_includes(node) + + self.lines = lines + [(c_preproc.POPFILE, '')] + self.lines + + def start(self, node, env): + try: + self.parse_cache = node.ctx.parse_cache + except AttributeError: + self.parse_cache = node.ctx.parse_cache = {} + + self.addlines(node) + while self.lines: + (x, y) = self.lines.pop(0) + if x == c_preproc.POPFILE: + self.currentnode_stack.pop() + continue + self.tryfind(y, env=env) + +c_preproc.c_parser = dumb_parser + diff --git a/third_party/waf/waflib/extras/c_emscripten.py b/third_party/waf/waflib/extras/c_emscripten.py new file mode 100644 index 0000000..e1ac494 --- /dev/null +++ b/third_party/waf/waflib/extras/c_emscripten.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# -*- coding: utf-8 vi:ts=4:noexpandtab + +import subprocess, shlex, sys + +from waflib.Tools import ccroot, gcc, gxx +from waflib.Configure import conf +from waflib.TaskGen import after_method, feature + +from waflib.Tools.compiler_c import c_compiler +from waflib.Tools.compiler_cxx import cxx_compiler + +for supported_os in ('linux', 'darwin', 'gnu', 'aix'): + c_compiler[supported_os].append('c_emscripten') + cxx_compiler[supported_os].append('c_emscripten') + + +@conf +def get_emscripten_version(conf, cc): + """ + Emscripten doesn't support processing '-' like clang/gcc + """ + + dummy = conf.cachedir.parent.make_node("waf-emscripten.c") + dummy.write("") + cmd = cc + ['-dM', '-E', '-x', 'c', dummy.abspath()] + env = conf.env.env or None + try: + p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env) + out = p.communicate()[0] + except Exception as e: + conf.fatal('Could not determine emscripten version %r: %s' % (cmd, e)) + + if not isinstance(out, str): + out = out.decode(sys.stdout.encoding or 'latin-1') + + k = {} + out = out.splitlines() + for line in out: + lst = shlex.split(line) + if len(lst)>2: + key = lst[1] + val = lst[2] + k[key] = val + + if not ('__clang__' in k and 'EMSCRIPTEN' in k): + conf.fatal('Could not determine the emscripten compiler version.') + + conf.env.DEST_OS = 'generic' + conf.env.DEST_BINFMT = 'elf' + conf.env.DEST_CPU = 'asm-js' + conf.env.CC_VERSION = (k['__clang_major__'], k['__clang_minor__'], k['__clang_patchlevel__']) + return k + +@conf +def find_emscripten(conf): + cc = conf.find_program(['emcc'], var='CC') + conf.get_emscripten_version(cc) + conf.env.CC = cc + conf.env.CC_NAME = 'emscripten' + cxx = conf.find_program(['em++'], var='CXX') + conf.env.CXX = cxx + conf.env.CXX_NAME = 'emscripten' + conf.find_program(['emar'], var='AR') + +def configure(conf): + conf.find_emscripten() + conf.find_ar() + conf.gcc_common_flags() + conf.gxx_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() + conf.env.ARFLAGS = ['rcs'] + conf.env.cshlib_PATTERN = '%s.js' + conf.env.cxxshlib_PATTERN = '%s.js' + conf.env.cstlib_PATTERN = '%s.a' + conf.env.cxxstlib_PATTERN = '%s.a' + conf.env.cprogram_PATTERN = '%s.html' + conf.env.cxxprogram_PATTERN = '%s.html' + conf.env.CXX_TGT_F = ['-c', '-o', ''] + conf.env.CC_TGT_F = ['-c', '-o', ''] + conf.env.CXXLNK_TGT_F = ['-o', ''] + conf.env.CCLNK_TGT_F = ['-o', ''] + conf.env.append_value('LINKFLAGS',['-Wl,--enable-auto-import']) diff --git a/third_party/waf/waflib/extras/c_nec.py b/third_party/waf/waflib/extras/c_nec.py new file mode 100644 index 0000000..96bfae4 --- /dev/null +++ b/third_party/waf/waflib/extras/c_nec.py @@ -0,0 +1,74 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +""" +NEC SX Compiler for SX vector systems +""" + +import re +from waflib import Utils +from waflib.Tools import ccroot,ar +from waflib.Configure import conf + +from waflib.Tools import xlc # method xlc_common_flags +from waflib.Tools.compiler_c import c_compiler +c_compiler['linux'].append('c_nec') + +@conf +def find_sxc(conf): + cc = conf.find_program(['sxcc'], var='CC') + conf.get_sxc_version(cc) + conf.env.CC = cc + conf.env.CC_NAME = 'sxcc' + +@conf +def get_sxc_version(conf, fc): + version_re = re.compile(r"C\+\+/SX\s*Version\s*(?P<major>\d*)\.(?P<minor>\d*)", re.I).search + cmd = fc + ['-V'] + p = Utils.subprocess.Popen(cmd, stdin=False, stdout=Utils.subprocess.PIPE, stderr=Utils.subprocess.PIPE, env=None) + out, err = p.communicate() + + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not determine the NEC C compiler version.') + k = match.groupdict() + conf.env['C_VERSION'] = (k['major'], k['minor']) + +@conf +def sxc_common_flags(conf): + v=conf.env + v['CC_SRC_F']=[] + v['CC_TGT_F']=['-c','-o'] + if not v['LINK_CC']: + v['LINK_CC']=v['CC'] + v['CCLNK_SRC_F']=[] + v['CCLNK_TGT_F']=['-o'] + v['CPPPATH_ST']='-I%s' + v['DEFINES_ST']='-D%s' + v['LIB_ST']='-l%s' + v['LIBPATH_ST']='-L%s' + v['STLIB_ST']='-l%s' + v['STLIBPATH_ST']='-L%s' + v['RPATH_ST']='' + v['SONAME_ST']=[] + v['SHLIB_MARKER']=[] + v['STLIB_MARKER']=[] + v['LINKFLAGS_cprogram']=[''] + v['cprogram_PATTERN']='%s' + v['CFLAGS_cshlib']=['-fPIC'] + v['LINKFLAGS_cshlib']=[''] + v['cshlib_PATTERN']='lib%s.so' + v['LINKFLAGS_cstlib']=[] + v['cstlib_PATTERN']='lib%s.a' + +def configure(conf): + conf.find_sxc() + conf.find_program('sxar',VAR='AR') + conf.sxc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/extras/cabal.py b/third_party/waf/waflib/extras/cabal.py new file mode 100644 index 0000000..e10a0d1 --- /dev/null +++ b/third_party/waf/waflib/extras/cabal.py @@ -0,0 +1,152 @@ +#!/usr/bin/env python
+# encoding: utf-8
+# Anton Feldmann, 2012
+# "Base for cabal"
+
+from waflib import Task, Utils
+from waflib.TaskGen import extension
+from waflib.Utils import threading
+from shutil import rmtree
+
+lock = threading.Lock()
+registering = False
+
+def configure(self):
+ self.find_program('cabal', var='CABAL')
+ self.find_program('ghc-pkg', var='GHCPKG')
+ pkgconfd = self.bldnode.abspath() + '/package.conf.d'
+ self.env.PREFIX = self.bldnode.abspath() + '/dist'
+ self.env.PKGCONFD = pkgconfd
+ if self.root.find_node(pkgconfd + '/package.cache'):
+ self.msg('Using existing package database', pkgconfd, color='CYAN')
+ else:
+ pkgdir = self.root.find_dir(pkgconfd)
+ if pkgdir:
+ self.msg('Deleting corrupt package database', pkgdir.abspath(), color ='RED')
+ rmtree(pkgdir.abspath())
+ pkgdir = None
+
+ self.cmd_and_log(self.env.GHCPKG + ['init', pkgconfd])
+ self.msg('Created package database', pkgconfd, color = 'YELLOW' if pkgdir else 'GREEN')
+
+@extension('.cabal')
+def process_cabal(self, node):
+ out_dir_node = self.bld.root.find_dir(self.bld.out_dir)
+ package_node = node.change_ext('.package')
+ package_node = out_dir_node.find_or_declare(package_node.name)
+ build_node = node.parent.get_bld()
+ build_path = build_node.abspath()
+ config_node = build_node.find_or_declare('setup-config')
+ inplace_node = build_node.find_or_declare('package.conf.inplace')
+
+ config_task = self.create_task('cabal_configure', node)
+ config_task.cwd = node.parent.abspath()
+ config_task.depends_on = getattr(self, 'depends_on', '')
+ config_task.build_path = build_path
+ config_task.set_outputs(config_node)
+
+ build_task = self.create_task('cabal_build', config_node)
+ build_task.cwd = node.parent.abspath()
+ build_task.build_path = build_path
+ build_task.set_outputs(inplace_node)
+
+ copy_task = self.create_task('cabal_copy', inplace_node)
+ copy_task.cwd = node.parent.abspath()
+ copy_task.depends_on = getattr(self, 'depends_on', '')
+ copy_task.build_path = build_path
+
+ last_task = copy_task
+ task_list = [config_task, build_task, copy_task]
+
+ if (getattr(self, 'register', False)):
+ register_task = self.create_task('cabal_register', inplace_node)
+ register_task.cwd = node.parent.abspath()
+ register_task.set_run_after(copy_task)
+ register_task.build_path = build_path
+
+ pkgreg_task = self.create_task('ghcpkg_register', inplace_node)
+ pkgreg_task.cwd = node.parent.abspath()
+ pkgreg_task.set_run_after(register_task)
+ pkgreg_task.build_path = build_path
+
+ last_task = pkgreg_task
+ task_list += [register_task, pkgreg_task]
+
+ touch_task = self.create_task('cabal_touch', inplace_node)
+ touch_task.set_run_after(last_task)
+ touch_task.set_outputs(package_node)
+ touch_task.build_path = build_path
+
+ task_list += [touch_task]
+
+ return task_list
+
+def get_all_src_deps(node):
+ hs_deps = node.ant_glob('**/*.hs')
+ hsc_deps = node.ant_glob('**/*.hsc')
+ lhs_deps = node.ant_glob('**/*.lhs')
+ c_deps = node.ant_glob('**/*.c')
+ cpp_deps = node.ant_glob('**/*.cpp')
+ proto_deps = node.ant_glob('**/*.proto')
+ return sum([hs_deps, hsc_deps, lhs_deps, c_deps, cpp_deps, proto_deps], [])
+
+class Cabal(Task.Task):
+ def scan(self):
+ return (get_all_src_deps(self.generator.path), ())
+
+class cabal_configure(Cabal):
+ run_str = '${CABAL} configure -v0 --prefix=${PREFIX} --global --user --package-db=${PKGCONFD} --builddir=${tsk.build_path}'
+ shell = True
+
+ def scan(self):
+ out_node = self.generator.bld.root.find_dir(self.generator.bld.out_dir)
+ deps = [out_node.find_or_declare(dep).change_ext('.package') for dep in Utils.to_list(self.depends_on)]
+ return (deps, ())
+
+class cabal_build(Cabal):
+ run_str = '${CABAL} build -v1 --builddir=${tsk.build_path}/'
+ shell = True
+
+class cabal_copy(Cabal):
+ run_str = '${CABAL} copy -v0 --builddir=${tsk.build_path}'
+ shell = True
+
+class cabal_register(Cabal):
+ run_str = '${CABAL} register -v0 --gen-pkg-config=${tsk.build_path}/pkg.config --builddir=${tsk.build_path}'
+ shell = True
+
+class ghcpkg_register(Cabal):
+ run_str = '${GHCPKG} update -v0 --global --user --package-conf=${PKGCONFD} ${tsk.build_path}/pkg.config'
+ shell = True
+
+ def runnable_status(self):
+ global lock, registering
+
+ val = False
+ lock.acquire()
+ val = registering
+ lock.release()
+
+ if val:
+ return Task.ASK_LATER
+
+ ret = Task.Task.runnable_status(self)
+ if ret == Task.RUN_ME:
+ lock.acquire()
+ registering = True
+ lock.release()
+
+ return ret
+
+ def post_run(self):
+ global lock, registering
+
+ lock.acquire()
+ registering = False
+ lock.release()
+
+ return Task.Task.post_run(self)
+
+class cabal_touch(Cabal):
+ run_str = 'touch ${TGT}'
+
diff --git a/third_party/waf/waflib/extras/cfg_altoptions.py b/third_party/waf/waflib/extras/cfg_altoptions.py new file mode 100644 index 0000000..47b1189 --- /dev/null +++ b/third_party/waf/waflib/extras/cfg_altoptions.py @@ -0,0 +1,110 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Tool to extend c_config.check_cfg() + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2014" + +""" + +This tool allows to work around the absence of ``*-config`` programs +on systems, by keeping the same clean configuration syntax but inferring +values or permitting their modification via the options interface. + +Note that pkg-config can also support setting ``PKG_CONFIG_PATH``, +so you can put custom files in a folder containing new .pc files. +This tool could also be implemented by taking advantage of this fact. + +Usage:: + + def options(opt): + opt.load('c_config_alt') + opt.add_package_option('package') + + def configure(cfg): + conf.load('c_config_alt') + conf.check_cfg(...) + +Known issues: + +- Behavior with different build contexts... + +""" + +import os +import functools +from waflib import Configure, Options, Errors + +def name_to_dest(x): + return x.lower().replace('-', '_') + + +def options(opt): + def x(opt, param): + dest = name_to_dest(param) + gr = opt.get_option_group("configure options") + gr.add_option('--%s-root' % dest, + help="path containing include and lib subfolders for %s" \ + % param, + ) + + opt.add_package_option = functools.partial(x, opt) + + +check_cfg_old = getattr(Configure.ConfigurationContext, 'check_cfg') + +@Configure.conf +def check_cfg(conf, *k, **kw): + if k: + lst = k[0].split() + kw['package'] = lst[0] + kw['args'] = ' '.join(lst[1:]) + + if not 'package' in kw: + return check_cfg_old(conf, **kw) + + package = kw['package'] + + package_lo = name_to_dest(package) + package_hi = package.upper().replace('-', '_') # TODO FIXME + package_hi = kw.get('uselib_store', package_hi) + + def check_folder(path, name): + try: + assert os.path.isdir(path) + except AssertionError: + raise Errors.ConfigurationError( + "%s_%s (%s) is not a folder!" \ + % (package_lo, name, path)) + return path + + root = getattr(Options.options, '%s_root' % package_lo, None) + + if root is None: + return check_cfg_old(conf, **kw) + else: + def add_manual_var(k, v): + conf.start_msg('Adding for %s a manual var' % (package)) + conf.env["%s_%s" % (k, package_hi)] = v + conf.end_msg("%s = %s" % (k, v)) + + + check_folder(root, 'root') + + pkg_inc = check_folder(os.path.join(root, "include"), 'inc') + add_manual_var('INCLUDES', [pkg_inc]) + pkg_lib = check_folder(os.path.join(root, "lib"), 'libpath') + add_manual_var('LIBPATH', [pkg_lib]) + add_manual_var('LIB', [package]) + + for x in kw.get('manual_deps', []): + for k, v in sorted(conf.env.get_merged_dict().items()): + if k.endswith('_%s' % x): + k = k.replace('_%s' % x, '') + conf.start_msg('Adding for %s a manual dep' \ + %(package)) + conf.env["%s_%s" % (k, package_hi)] += v + conf.end_msg('%s += %s' % (k, v)) + + return True + diff --git a/third_party/waf/waflib/extras/clang_compilation_database.py b/third_party/waf/waflib/extras/clang_compilation_database.py new file mode 100644 index 0000000..bd29db9 --- /dev/null +++ b/third_party/waf/waflib/extras/clang_compilation_database.py @@ -0,0 +1,137 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Christoph Koke, 2013 +# Alibek Omarov, 2019 + +""" +Writes the c and cpp compile commands into build/compile_commands.json +see http://clang.llvm.org/docs/JSONCompilationDatabase.html + +Usage: + + Load this tool in `options` to be able to generate database + by request in command-line and before build: + + $ waf clangdb + + def options(opt): + opt.load('clang_compilation_database') + + Otherwise, load only in `configure` to generate it always before build. + + def configure(conf): + conf.load('compiler_cxx') + ... + conf.load('clang_compilation_database') +""" + +from waflib import Logs, TaskGen, Task, Build, Scripting + +Task.Task.keep_last_cmd = True + +class ClangDbContext(Build.BuildContext): + '''generates compile_commands.json by request''' + cmd = 'clangdb' + + def write_compilation_database(self): + """ + Write the clang compilation database as JSON + """ + database_file = self.bldnode.make_node('compile_commands.json') + Logs.info('Build commands will be stored in %s', database_file.path_from(self.path)) + try: + root = database_file.read_json() + except IOError: + root = [] + clang_db = dict((x['file'], x) for x in root) + for task in self.clang_compilation_database_tasks: + try: + cmd = task.last_cmd + except AttributeError: + continue + f_node = task.inputs[0] + filename = f_node.path_from(task.get_cwd()) + entry = { + "directory": task.get_cwd().abspath(), + "arguments": cmd, + "file": filename, + } + clang_db[filename] = entry + root = list(clang_db.values()) + database_file.write_json(root) + + def execute(self): + """ + Build dry run + """ + self.restore() + self.cur_tasks = [] + self.clang_compilation_database_tasks = [] + + if not self.all_envs: + self.load_envs() + + self.recurse([self.run_dir]) + self.pre_build() + + # we need only to generate last_cmd, so override + # exec_command temporarily + def exec_command(self, *k, **kw): + return 0 + + for g in self.groups: + for tg in g: + try: + f = tg.post + except AttributeError: + pass + else: + f() + + if isinstance(tg, Task.Task): + lst = [tg] + else: lst = tg.tasks + for tsk in lst: + if tsk.__class__.__name__ == "swig": + tsk.runnable_status() + if hasattr(tsk, 'more_tasks'): + lst.extend(tsk.more_tasks) + # Not all dynamic tasks can be processed, in some cases + # one may have to call the method "run()" like this: + #elif tsk.__class__.__name__ == 'src2c': + # tsk.run() + # if hasattr(tsk, 'more_tasks'): + # lst.extend(tsk.more_tasks) + + tup = tuple(y for y in [Task.classes.get(x) for x in ('c', 'cxx')] if y) + if isinstance(tsk, tup): + self.clang_compilation_database_tasks.append(tsk) + tsk.nocache = True + old_exec = tsk.exec_command + tsk.exec_command = exec_command + tsk.run() + tsk.exec_command = old_exec + + self.write_compilation_database() + +EXECUTE_PATCHED = False +def patch_execute(): + global EXECUTE_PATCHED + + if EXECUTE_PATCHED: + return + + def new_execute_build(self): + """ + Invoke clangdb command before build + """ + if self.cmd.startswith('build'): + Scripting.run_command(self.cmd.replace('build','clangdb')) + + old_execute_build(self) + + old_execute_build = getattr(Build.BuildContext, 'execute_build', None) + setattr(Build.BuildContext, 'execute_build', new_execute_build) + EXECUTE_PATCHED = True + +patch_execute() diff --git a/third_party/waf/waflib/extras/clang_cross.py b/third_party/waf/waflib/extras/clang_cross.py new file mode 100644 index 0000000..1b51e28 --- /dev/null +++ b/third_party/waf/waflib/extras/clang_cross.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Krzysztof KosiÅ„ski 2014 +# DragoonX6 2018 + +""" +Detect the Clang C compiler +This version is an attempt at supporting the -target and -sysroot flag of Clang. +""" + +from waflib.Tools import ccroot, ar, gcc +from waflib.Configure import conf +import waflib.Context +import waflib.extras.clang_cross_common + +def options(opt): + """ + Target triplet for clang:: + $ waf configure --clang-target-triple=x86_64-pc-linux-gnu + """ + cc_compiler_opts = opt.add_option_group('Configuration options') + cc_compiler_opts.add_option('--clang-target-triple', default=None, + help='Target triple for clang', + dest='clang_target_triple') + cc_compiler_opts.add_option('--clang-sysroot', default=None, + help='Sysroot for clang', + dest='clang_sysroot') + +@conf +def find_clang(conf): + """ + Finds the program clang and executes it to ensure it really is clang + """ + + import os + + cc = conf.find_program('clang', var='CC') + + if conf.options.clang_target_triple != None: + conf.env.append_value('CC', ['-target', conf.options.clang_target_triple]) + + if conf.options.clang_sysroot != None: + sysroot = str() + + if os.path.isabs(conf.options.clang_sysroot): + sysroot = conf.options.clang_sysroot + else: + sysroot = os.path.normpath(os.path.join(os.getcwd(), conf.options.clang_sysroot)) + + conf.env.append_value('CC', ['--sysroot', sysroot]) + + conf.get_cc_version(cc, clang=True) + conf.env.CC_NAME = 'clang' + +@conf +def clang_modifier_x86_64_w64_mingw32(conf): + conf.gcc_modifier_win32() + +@conf +def clang_modifier_i386_w64_mingw32(conf): + conf.gcc_modifier_win32() + +@conf +def clang_modifier_x86_64_windows_msvc(conf): + conf.clang_modifier_msvc() + + # Allow the user to override any flags if they so desire. + clang_modifier_user_func = getattr(conf, 'clang_modifier_x86_64_windows_msvc_user', None) + if clang_modifier_user_func: + clang_modifier_user_func() + +@conf +def clang_modifier_i386_windows_msvc(conf): + conf.clang_modifier_msvc() + + # Allow the user to override any flags if they so desire. + clang_modifier_user_func = getattr(conf, 'clang_modifier_i386_windows_msvc_user', None) + if clang_modifier_user_func: + clang_modifier_user_func() + +def configure(conf): + conf.find_clang() + conf.find_program(['llvm-ar', 'ar'], var='AR') + conf.find_ar() + conf.gcc_common_flags() + # Allow the user to provide flags for the target platform. + conf.gcc_modifier_platform() + # And allow more fine grained control based on the compiler's triplet. + conf.clang_modifier_target_triple() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/extras/clang_cross_common.py b/third_party/waf/waflib/extras/clang_cross_common.py new file mode 100644 index 0000000..b76a070 --- /dev/null +++ b/third_party/waf/waflib/extras/clang_cross_common.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python +# encoding: utf-8 +# DragoonX6 2018 + +""" +Common routines for cross_clang.py and cross_clangxx.py +""" + +from waflib.Configure import conf +import waflib.Context + +def normalize_target_triple(target_triple): + target_triple = target_triple[:-1] + normalized_triple = target_triple.replace('--', '-unknown-') + + if normalized_triple.startswith('-'): + normalized_triple = 'unknown' + normalized_triple + + if normalized_triple.endswith('-'): + normalized_triple += 'unknown' + + # Normalize MinGW builds to *arch*-w64-mingw32 + if normalized_triple.endswith('windows-gnu'): + normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-w64-mingw32' + + # Strip the vendor when doing msvc builds, since it's unused anyway. + if normalized_triple.endswith('windows-msvc'): + normalized_triple = normalized_triple[:normalized_triple.index('-')] + '-windows-msvc' + + return normalized_triple.replace('-', '_') + +@conf +def clang_modifier_msvc(conf): + import os + + """ + Really basic setup to use clang in msvc mode. + We actually don't really want to do a lot, even though clang is msvc compatible + in this mode, that doesn't mean we're actually using msvc. + It's probably the best to leave it to the user, we can assume msvc mode if the user + uses the clang-cl frontend, but this module only concerns itself with the gcc-like frontend. + """ + v = conf.env + v.cprogram_PATTERN = '%s.exe' + + v.cshlib_PATTERN = '%s.dll' + v.implib_PATTERN = '%s.lib' + v.IMPLIB_ST = '-Wl,-IMPLIB:%s' + v.SHLIB_MARKER = [] + + v.CFLAGS_cshlib = [] + v.LINKFLAGS_cshlib = ['-Wl,-DLL'] + v.cstlib_PATTERN = '%s.lib' + v.STLIB_MARKER = [] + + del(v.AR) + conf.find_program(['llvm-lib', 'lib'], var='AR') + v.ARFLAGS = ['-nologo'] + v.AR_TGT_F = ['-out:'] + + # Default to the linker supplied with llvm instead of link.exe or ld + v.LINK_CC = v.CC + ['-fuse-ld=lld', '-nostdlib'] + v.CCLNK_TGT_F = ['-o'] + v.def_PATTERN = '-Wl,-def:%s' + + v.LINKFLAGS = [] + + v.LIB_ST = '-l%s' + v.LIBPATH_ST = '-Wl,-LIBPATH:%s' + v.STLIB_ST = '-l%s' + v.STLIBPATH_ST = '-Wl,-LIBPATH:%s' + + CFLAGS_CRT_COMMON = [ + '-Xclang', '--dependent-lib=oldnames', + '-Xclang', '-fno-rtti-data', + '-D_MT' + ] + + v.CFLAGS_CRT_MULTITHREADED = CFLAGS_CRT_COMMON + [ + '-Xclang', '-flto-visibility-public-std', + '-Xclang', '--dependent-lib=libcmt', + ] + v.CXXFLAGS_CRT_MULTITHREADED = v.CFLAGS_CRT_MULTITHREADED + + v.CFLAGS_CRT_MULTITHREADED_DBG = CFLAGS_CRT_COMMON + [ + '-D_DEBUG', + '-Xclang', '-flto-visibility-public-std', + '-Xclang', '--dependent-lib=libcmtd', + ] + v.CXXFLAGS_CRT_MULTITHREADED_DBG = v.CFLAGS_CRT_MULTITHREADED_DBG + + v.CFLAGS_CRT_MULTITHREADED_DLL = CFLAGS_CRT_COMMON + [ + '-D_DLL', + '-Xclang', '--dependent-lib=msvcrt' + ] + v.CXXFLAGS_CRT_MULTITHREADED_DLL = v.CFLAGS_CRT_MULTITHREADED_DLL + + v.CFLAGS_CRT_MULTITHREADED_DLL_DBG = CFLAGS_CRT_COMMON + [ + '-D_DLL', + '-D_DEBUG', + '-Xclang', '--dependent-lib=msvcrtd', + ] + v.CXXFLAGS_CRT_MULTITHREADED_DLL_DBG = v.CFLAGS_CRT_MULTITHREADED_DLL_DBG + +@conf +def clang_modifier_target_triple(conf, cpp=False): + compiler = conf.env.CXX if cpp else conf.env.CC + output = conf.cmd_and_log(compiler + ['-dumpmachine'], output=waflib.Context.STDOUT) + + modifier = ('clangxx' if cpp else 'clang') + '_modifier_' + clang_modifier_func = getattr(conf, modifier + normalize_target_triple(output), None) + if clang_modifier_func: + clang_modifier_func() diff --git a/third_party/waf/waflib/extras/clangxx_cross.py b/third_party/waf/waflib/extras/clangxx_cross.py new file mode 100644 index 0000000..0ad38ad --- /dev/null +++ b/third_party/waf/waflib/extras/clangxx_cross.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy 2009-2018 (ita) +# DragoonX6 2018 + +""" +Detect the Clang++ C++ compiler +This version is an attempt at supporting the -target and -sysroot flag of Clang++. +""" + +from waflib.Tools import ccroot, ar, gxx +from waflib.Configure import conf +import waflib.extras.clang_cross_common + +def options(opt): + """ + Target triplet for clang++:: + $ waf configure --clangxx-target-triple=x86_64-pc-linux-gnu + """ + cxx_compiler_opts = opt.add_option_group('Configuration options') + cxx_compiler_opts.add_option('--clangxx-target-triple', default=None, + help='Target triple for clang++', + dest='clangxx_target_triple') + cxx_compiler_opts.add_option('--clangxx-sysroot', default=None, + help='Sysroot for clang++', + dest='clangxx_sysroot') + +@conf +def find_clangxx(conf): + """ + Finds the program clang++, and executes it to ensure it really is clang++ + """ + + import os + + cxx = conf.find_program('clang++', var='CXX') + + if conf.options.clangxx_target_triple != None: + conf.env.append_value('CXX', ['-target', conf.options.clangxx_target_triple]) + + if conf.options.clangxx_sysroot != None: + sysroot = str() + + if os.path.isabs(conf.options.clangxx_sysroot): + sysroot = conf.options.clangxx_sysroot + else: + sysroot = os.path.normpath(os.path.join(os.getcwd(), conf.options.clangxx_sysroot)) + + conf.env.append_value('CXX', ['--sysroot', sysroot]) + + conf.get_cc_version(cxx, clang=True) + conf.env.CXX_NAME = 'clang' + +@conf +def clangxx_modifier_x86_64_w64_mingw32(conf): + conf.gcc_modifier_win32() + +@conf +def clangxx_modifier_i386_w64_mingw32(conf): + conf.gcc_modifier_win32() + +@conf +def clangxx_modifier_msvc(conf): + v = conf.env + v.cxxprogram_PATTERN = v.cprogram_PATTERN + v.cxxshlib_PATTERN = v.cshlib_PATTERN + + v.CXXFLAGS_cxxshlib = [] + v.LINKFLAGS_cxxshlib = v.LINKFLAGS_cshlib + v.cxxstlib_PATTERN = v.cstlib_PATTERN + + v.LINK_CXX = v.CXX + ['-fuse-ld=lld', '-nostdlib'] + v.CXXLNK_TGT_F = v.CCLNK_TGT_F + +@conf +def clangxx_modifier_x86_64_windows_msvc(conf): + conf.clang_modifier_msvc() + conf.clangxx_modifier_msvc() + + # Allow the user to override any flags if they so desire. + clang_modifier_user_func = getattr(conf, 'clangxx_modifier_x86_64_windows_msvc_user', None) + if clang_modifier_user_func: + clang_modifier_user_func() + +@conf +def clangxx_modifier_i386_windows_msvc(conf): + conf.clang_modifier_msvc() + conf.clangxx_modifier_msvc() + + # Allow the user to override any flags if they so desire. + clang_modifier_user_func = getattr(conf, 'clangxx_modifier_i386_windows_msvc_user', None) + if clang_modifier_user_func: + clang_modifier_user_func() + +def configure(conf): + conf.find_clangxx() + conf.find_program(['llvm-ar', 'ar'], var='AR') + conf.find_ar() + conf.gxx_common_flags() + # Allow the user to provide flags for the target platform. + conf.gxx_modifier_platform() + # And allow more fine grained control based on the compiler's triplet. + conf.clang_modifier_target_triple(cpp=True) + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/extras/classic_runner.py b/third_party/waf/waflib/extras/classic_runner.py new file mode 100644 index 0000000..b08c794 --- /dev/null +++ b/third_party/waf/waflib/extras/classic_runner.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2021 (ita) + +from waflib import Utils, Runner + +""" +Re-enable the classic threading system from waf 1.x + +def configure(conf): + conf.load('classic_runner') +""" + +class TaskConsumer(Utils.threading.Thread): + """ + Task consumers belong to a pool of workers + + They wait for tasks in the queue and then use ``task.process(...)`` + """ + def __init__(self, spawner): + Utils.threading.Thread.__init__(self) + """ + Obtain :py:class:`waflib.Task.TaskBase` instances from this queue. + """ + self.spawner = spawner + self.daemon = True + self.start() + + def run(self): + """ + Loop over the tasks to execute + """ + try: + self.loop() + except Exception: + pass + + def loop(self): + """ + Obtain tasks from :py:attr:`waflib.Runner.TaskConsumer.ready` and call + :py:meth:`waflib.Task.TaskBase.process`. If the object is a function, execute it. + """ + master = self.spawner.master + while 1: + if not master.stop: + try: + tsk = master.ready.get() + if tsk: + tsk.log_display(tsk.generator.bld) + master.process_task(tsk) + else: + break + finally: + master.out.put(tsk) + +class Spawner(object): + """ + Daemon thread that consumes tasks from :py:class:`waflib.Runner.Parallel` producer and + spawns a consuming thread :py:class:`waflib.Runner.Consumer` for each + :py:class:`waflib.Task.Task` instance. + """ + def __init__(self, master): + self.master = master + """:py:class:`waflib.Runner.Parallel` producer instance""" + + self.pool = [TaskConsumer(self) for i in range(master.numjobs)] + +Runner.Spawner = Spawner diff --git a/third_party/waf/waflib/extras/codelite.py b/third_party/waf/waflib/extras/codelite.py new file mode 100644 index 0000000..523302c --- /dev/null +++ b/third_party/waf/waflib/extras/codelite.py @@ -0,0 +1,875 @@ +#! /usr/bin/env python +# encoding: utf-8 +# CodeLite Project +# Christian Klein (chrikle@berlios.de) +# Created: Jan 2012 +# As templete for this file I used the msvs.py +# I hope this template will work proper + +""" +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" + +""" + + +To add this tool to your project: +def options(conf): + opt.load('codelite') + +It can be a good idea to add the sync_exec tool too. + +To generate solution files: +$ waf configure codelite + +To customize the outputs, provide subclasses in your wscript files: + +from waflib.extras import codelite +class vsnode_target(codelite.vsnode_target): + def get_build_command(self, props): + # likely to be required + return "waf.bat build" + def collect_source(self): + # likely to be required + ... +class codelite_bar(codelite.codelite_generator): + def init(self): + codelite.codelite_generator.init(self) + self.vsnode_target = vsnode_target + +The codelite class re-uses the same build() function for reading the targets (task generators), +you may therefore specify codelite settings on the context object: + +def build(bld): + bld.codelite_solution_name = 'foo.workspace' + bld.waf_command = 'waf.bat' + bld.projects_dir = bld.srcnode.make_node('') + bld.projects_dir.mkdir() + + +ASSUMPTIONS: +* a project can be either a directory or a target, project files are written only for targets that have source files +* each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path +""" + +import os, re, sys +import uuid # requires python 2.5 +from waflib.Build import BuildContext +from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options + +HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' + +PROJECT_TEMPLATE = r'''<?xml version="1.0" encoding="utf-8"?> +<CodeLite_Project Name="${project.name}" InternalType="Library"> + <Plugins> + <Plugin Name="qmake"> + <![CDATA[00010001N0005Release000000000000]]> + </Plugin> + </Plugins> + <Description/> + <Dependencies/> + <VirtualDirectory Name="src"> + ${for x in project.source} + ${if (project.get_key(x)=="sourcefile")} + <File Name="${x.abspath()}"/> + ${endif} + ${endfor} + </VirtualDirectory> + <VirtualDirectory Name="include"> + ${for x in project.source} + ${if (project.get_key(x)=="headerfile")} + <File Name="${x.abspath()}"/> + ${endif} + ${endfor} + </VirtualDirectory> + <Settings Type="Dynamic Library"> + <GlobalSettings> + <Compiler Options="" C_Options=""> + <IncludePath Value="."/> + </Compiler> + <Linker Options=""> + <LibraryPath Value="."/> + </Linker> + <ResourceCompiler Options=""/> + </GlobalSettings> + <Configuration Name="Release" CompilerType="gnu gcc" ReleasegerType="GNU gdb Releaseger" Type="Dynamic Library" BuildCmpWithGlobalSettings="append" BuildLnkWithGlobalSettings="append" BuildResWithGlobalSettings="append"> + <Compiler Options="" C_Options="" Required="yes" PreCompiledHeader="" PCHInCommandLine="no" UseDifferentPCHFlags="no" PCHFlags=""> + <IncludePath Value="."/> + <IncludePath Value="."/> + </Compiler> + <Linker Options="" Required="yes"> + <LibraryPath Value=""/> + </Linker> + <ResourceCompiler Options="" Required="no"/> + <General OutputFile="${xml:project.build_properties[0].output_file}" IntermediateDirectory="" Command="" CommandArguments="" PauseExecWhenProcTerminates="yes"/> + <Environment EnvVarSetName="<Use Defaults>" DbgSetName="<Use Defaults>"> + <![CDATA[]]> + </Environment> + <Releaseger IsRemote="no" RemoteHostName="" RemoteHostPort="" ReleasegerPath=""> + <PostConnectCommands/> + <StartupCommands/> + </Releaseger> + <PreBuild/> + <PostBuild/> + <CustomBuild Enabled="yes"> + $b = project.build_properties[0]} + <RebuildCommand>${xml:project.get_rebuild_command(project.build_properties[0])}</RebuildCommand> + <CleanCommand>${xml:project.get_clean_command(project.build_properties[0])}</CleanCommand> + <BuildCommand>${xml:project.get_build_command(project.build_properties[0])}</BuildCommand> + <Target Name="Install">${xml:project.get_install_command(project.build_properties[0])}</Target> + <Target Name="Build and Install">${xml:project.get_build_and_install_command(project.build_properties[0])}</Target> + <Target Name="Build All">${xml:project.get_build_all_command(project.build_properties[0])}</Target> + <Target Name="Rebuild All">${xml:project.get_rebuild_all_command(project.build_properties[0])}</Target> + <Target Name="Clean All">${xml:project.get_clean_all_command(project.build_properties[0])}</Target> + <Target Name="Build and Install All">${xml:project.get_build_and_install_all_command(project.build_properties[0])}</Target> + <PreprocessFileCommand/> + <SingleFileCommand/> + <MakefileGenerationCommand/> + <ThirdPartyToolName>None</ThirdPartyToolName> + <WorkingDirectory/> + </CustomBuild> + <AdditionalRules> + <CustomPostBuild/> + <CustomPreBuild/> + </AdditionalRules> + <Completion> + <ClangCmpFlags/> + <ClangPP/> + <SearchPaths/> + </Completion> + </Configuration> + <Configuration Name="Release" CompilerType="gnu gcc" ReleasegerType="GNU gdb Releaseger" Type="" BuildCmpWithGlobalSettings="append" BuildLnkWithGlobalSettings="append" BuildResWithGlobalSettings="append"> + <Compiler Options="" C_Options="" Required="yes" PreCompiledHeader="" PCHInCommandLine="no" UseDifferentPCHFlags="no" PCHFlags=""> + <IncludePath Value="."/> + </Compiler> + <Linker Options="" Required="yes"/> + <ResourceCompiler Options="" Required="no"/> + <General OutputFile="" IntermediateDirectory="./Release" Command="" CommandArguments="" UseSeparateReleaseArgs="no" ReleaseArguments="" WorkingDirectory="$(IntermediateDirectory)" PauseExecWhenProcTerminates="yes"/> + <Environment EnvVarSetName="<Use Defaults>" DbgSetName="<Use Defaults>"> + <![CDATA[ + + + + ]]> + </Environment> + <Releaseger IsRemote="no" RemoteHostName="" RemoteHostPort="" ReleasegerPath=""> + <PostConnectCommands/> + <StartupCommands/> + </Releaseger> + <PreBuild/> + <PostBuild/> + <CustomBuild Enabled="no"> + <RebuildCommand/> + <CleanCommand/> + <BuildCommand/> + <PreprocessFileCommand/> + <SingleFileCommand/> + <MakefileGenerationCommand/> + <ThirdPartyToolName/> + <WorkingDirectory/> + </CustomBuild> + <AdditionalRules> + <CustomPostBuild/> + <CustomPreBuild/> + </AdditionalRules> + <Completion> + <ClangCmpFlags/> + <ClangPP/> + <SearchPaths/> + </Completion> + </Configuration> + </Settings> +</CodeLite_Project>''' + + + + +SOLUTION_TEMPLATE = '''<?xml version="1.0" encoding="utf-8"?> +<CodeLite_Workspace Name="${getattr(project, 'codelite_solution_name', None)[:-10]}" Database="./${getattr(project, 'codelite_solution_name', None)[:-10]}.tags"> +${for p in project.all_projects} + <Project Name = "${p.name}" Path = "${p.title}" Active="No"/> +${endfor} + <BuildMatrix> + <WorkspaceConfiguration Name="Release" Selected="yes"> +${for p in project.all_projects} + <Project Name="${p.name}" ConfigName="Release"/> +${endfor} + </WorkspaceConfiguration> + </BuildMatrix> +</CodeLite_Workspace>''' + + + +COMPILE_TEMPLATE = '''def f(project): + lst = [] + def xml_escape(value): + return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") + + %s + + #f = open('cmd.txt', 'w') + #f.write(str(lst)) + #f.close() + return ''.join(lst) +''' +reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<code>[^}]*?)\})", re.M) +def compile_template(line): + """ + Compile a template expression into a python function (like jsps, but way shorter) + """ + extr = [] + def repl(match): + g = match.group + if g('dollar'): + return "$" + elif g('backslash'): + return "\\" + elif g('subst'): + extr.append(g('code')) + return "<<|@|>>" + return None + + line2 = reg_act.sub(repl, line) + params = line2.split('<<|@|>>') + assert(extr) + + + indent = 0 + buf = [] + app = buf.append + + def app(txt): + buf.append(indent * '\t' + txt) + + for x in range(len(extr)): + if params[x]: + app("lst.append(%r)" % params[x]) + + f = extr[x] + if f.startswith(('if', 'for')): + app(f + ':') + indent += 1 + elif f.startswith('py:'): + app(f[3:]) + elif f.startswith(('endif', 'endfor')): + indent -= 1 + elif f.startswith(('else', 'elif')): + indent -= 1 + app(f + ':') + indent += 1 + elif f.startswith('xml:'): + app('lst.append(xml_escape(%s))' % f[4:]) + else: + #app('lst.append((%s) or "cannot find %s")' % (f, f)) + app('lst.append(%s)' % f) + + if extr: + if params[-1]: + app("lst.append(%r)" % params[-1]) + + fun = COMPILE_TEMPLATE % "\n\t".join(buf) + #print(fun) + return Task.funex(fun) + + +re_blank = re.compile('(\n|\r|\\s)*\n', re.M) +def rm_blank_lines(txt): + txt = re_blank.sub('\r\n', txt) + return txt + +BOM = '\xef\xbb\xbf' +try: + BOM = bytes(BOM, 'latin-1') # python 3 +except (TypeError, NameError): + pass + +def stealth_write(self, data, flags='wb'): + try: + unicode + except NameError: + data = data.encode('utf-8') # python 3 + else: + data = data.decode(sys.getfilesystemencoding(), 'replace') + data = data.encode('utf-8') + + if self.name.endswith('.project'): + data = BOM + data + + try: + txt = self.read(flags='rb') + if txt != data: + raise ValueError('must write') + except (IOError, ValueError): + self.write(data, flags=flags) + else: + Logs.debug('codelite: skipping %r', self) +Node.Node.stealth_write = stealth_write + +re_quote = re.compile("[^a-zA-Z0-9-]") +def quote(s): + return re_quote.sub("_", s) + +def xml_escape(value): + return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") + +def make_uuid(v, prefix = None): + """ + simple utility function + """ + if isinstance(v, dict): + keys = list(v.keys()) + keys.sort() + tmp = str([(k, v[k]) for k in keys]) + else: + tmp = str(v) + d = Utils.md5(tmp.encode()).hexdigest().upper() + if prefix: + d = '%s%s' % (prefix, d[8:]) + gid = uuid.UUID(d, version = 4) + return str(gid).upper() + +def diff(node, fromnode): + # difference between two nodes, but with "(..)" instead of ".." + c1 = node + c2 = fromnode + + c1h = c1.height() + c2h = c2.height() + + lst = [] + up = 0 + + while c1h > c2h: + lst.append(c1.name) + c1 = c1.parent + c1h -= 1 + + while c2h > c1h: + up += 1 + c2 = c2.parent + c2h -= 1 + + while id(c1) != id(c2): + lst.append(c1.name) + up += 1 + + c1 = c1.parent + c2 = c2.parent + + for i in range(up): + lst.append('(..)') + lst.reverse() + return tuple(lst) + +class build_property(object): + pass + +class vsnode(object): + """ + Abstract class representing visual studio elements + We assume that all visual studio nodes have a uuid and a parent + """ + def __init__(self, ctx): + self.ctx = ctx # codelite context + self.name = '' # string, mandatory + self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) + self.uuid = '' # string, mandatory + self.parent = None # parent node for visual studio nesting + + def get_waf(self): + """ + Override in subclasses... + """ + return '%s/%s' % (self.ctx.srcnode.abspath(), getattr(self.ctx, 'waf_command', 'waf')) + + def ptype(self): + """ + Return a special uuid for projects written in the solution file + """ + pass + + def write(self): + """ + Write the project file, by default, do nothing + """ + pass + + def make_uuid(self, val): + """ + Alias for creating uuid values easily (the templates cannot access global variables) + """ + return make_uuid(val) + +class vsnode_vsdir(vsnode): + """ + Nodes representing visual studio folders (which do not match the filesystem tree!) + """ + VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" + def __init__(self, ctx, uuid, name, vspath=''): + vsnode.__init__(self, ctx) + self.title = self.name = name + self.uuid = uuid + self.vspath = vspath or name + + def ptype(self): + return self.VS_GUID_SOLUTIONFOLDER + +class vsnode_project(vsnode): + """ + Abstract class representing visual studio project elements + A project is assumed to be writable, and has a node representing the file to write to + """ + VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" + def ptype(self): + return self.VS_GUID_VCPROJ + + def __init__(self, ctx, node): + vsnode.__init__(self, ctx) + self.path = node + self.uuid = make_uuid(node.abspath()) + self.name = node.name + self.title = self.path.abspath() + self.source = [] # list of node objects + self.build_properties = [] # list of properties (nmake commands, output dir, etc) + + def dirs(self): + """ + Get the list of parent folders of the source files (header files included) + for writing the filters + """ + lst = [] + def add(x): + if x.height() > self.tg.path.height() and x not in lst: + lst.append(x) + add(x.parent) + for x in self.source: + add(x.parent) + return lst + + def write(self): + Logs.debug('codelite: creating %r', self.path) + #print "self.name:",self.name + + # first write the project file + template1 = compile_template(PROJECT_TEMPLATE) + proj_str = template1(self) + proj_str = rm_blank_lines(proj_str) + self.path.stealth_write(proj_str) + + # then write the filter + #template2 = compile_template(FILTER_TEMPLATE) + #filter_str = template2(self) + #filter_str = rm_blank_lines(filter_str) + #tmp = self.path.parent.make_node(self.path.name + '.filters') + #tmp.stealth_write(filter_str) + + def get_key(self, node): + """ + required for writing the source files + """ + name = node.name + if name.endswith(('.cpp', '.c')): + return 'sourcefile' + return 'headerfile' + + def collect_properties(self): + """ + Returns a list of triplet (configuration, platform, output_directory) + """ + ret = [] + for c in self.ctx.configurations: + for p in self.ctx.platforms: + x = build_property() + x.outdir = '' + + x.configuration = c + x.platform = p + + x.preprocessor_definitions = '' + x.includes_search_path = '' + + # can specify "deploy_dir" too + ret.append(x) + self.build_properties = ret + + def get_build_params(self, props): + opt = '' + return (self.get_waf(), opt) + + def get_build_command(self, props): + return "%s build %s" % self.get_build_params(props) + + def get_clean_command(self, props): + return "%s clean %s" % self.get_build_params(props) + + def get_rebuild_command(self, props): + return "%s clean build %s" % self.get_build_params(props) + + def get_install_command(self, props): + return "%s install %s" % self.get_build_params(props) + def get_build_and_install_command(self, props): + return "%s build install %s" % self.get_build_params(props) + + def get_build_and_install_all_command(self, props): + return "%s build install" % self.get_build_params(props)[0] + + def get_clean_all_command(self, props): + return "%s clean" % self.get_build_params(props)[0] + + def get_build_all_command(self, props): + return "%s build" % self.get_build_params(props)[0] + + def get_rebuild_all_command(self, props): + return "%s clean build" % self.get_build_params(props)[0] + + def get_filter_name(self, node): + lst = diff(node, self.tg.path) + return '\\'.join(lst) or '.' + +class vsnode_alias(vsnode_project): + def __init__(self, ctx, node, name): + vsnode_project.__init__(self, ctx, node) + self.name = name + self.output_file = '' + +class vsnode_build_all(vsnode_alias): + """ + Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) + This is the only alias enabled by default + """ + def __init__(self, ctx, node, name='build_all_projects'): + vsnode_alias.__init__(self, ctx, node, name) + self.is_active = True + +class vsnode_install_all(vsnode_alias): + """ + Fake target used to emulate the behaviour of "make install" + """ + def __init__(self, ctx, node, name='install_all_projects'): + vsnode_alias.__init__(self, ctx, node, name) + + def get_build_command(self, props): + return "%s build install %s" % self.get_build_params(props) + + def get_clean_command(self, props): + return "%s clean %s" % self.get_build_params(props) + + def get_rebuild_command(self, props): + return "%s clean build install %s" % self.get_build_params(props) + +class vsnode_project_view(vsnode_alias): + """ + Fake target used to emulate a file system view + """ + def __init__(self, ctx, node, name='project_view'): + vsnode_alias.__init__(self, ctx, node, name) + self.tg = self.ctx() # fake one, cannot remove + self.exclude_files = Node.exclude_regs + ''' +waf-2* +waf3-2*/** +.waf-2* +.waf3-2*/** +**/*.sdf +**/*.suo +**/*.ncb +**/%s + ''' % Options.lockfile + + def collect_source(self): + # this is likely to be slow + self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) + + def get_build_command(self, props): + params = self.get_build_params(props) + (self.ctx.cmd,) + return "%s %s %s" % params + + def get_clean_command(self, props): + return "" + + def get_rebuild_command(self, props): + return self.get_build_command(props) + +class vsnode_target(vsnode_project): + """ + CodeLite project representing a targets (programs, libraries, etc) and bound + to a task generator + """ + def __init__(self, ctx, tg): + """ + A project is more or less equivalent to a file/folder + """ + base = getattr(ctx, 'projects_dir', None) or tg.path + node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node + vsnode_project.__init__(self, ctx, node) + self.name = quote(tg.name) + self.tg = tg # task generator + + def get_build_params(self, props): + """ + Override the default to add the target name + """ + opt = '' + if getattr(self, 'tg', None): + opt += " --targets=%s" % self.tg.name + return (self.get_waf(), opt) + + def collect_source(self): + tg = self.tg + source_files = tg.to_nodes(getattr(tg, 'source', [])) + include_dirs = Utils.to_list(getattr(tg, 'codelite_includes', [])) + include_files = [] + for x in include_dirs: + if isinstance(x, str): + x = tg.path.find_node(x) + if x: + lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] + include_files.extend(lst) + + # remove duplicates + self.source.extend(list(set(source_files + include_files))) + self.source.sort(key=lambda x: x.abspath()) + + def collect_properties(self): + """ + CodeLite projects are associated with platforms and configurations (for building especially) + """ + super(vsnode_target, self).collect_properties() + for x in self.build_properties: + x.outdir = self.path.parent.abspath() + x.preprocessor_definitions = '' + x.includes_search_path = '' + + try: + tsk = self.tg.link_task + except AttributeError: + pass + else: + x.output_file = tsk.outputs[0].abspath() + x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) + x.includes_search_path = ';'.join(self.tg.env.INCPATHS) + +class codelite_generator(BuildContext): + '''generates a CodeLite workspace''' + cmd = 'codelite' + fun = 'build' + + def init(self): + """ + Some data that needs to be present + """ + if not getattr(self, 'configurations', None): + self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc + if not getattr(self, 'platforms', None): + self.platforms = ['Win32'] + if not getattr(self, 'all_projects', None): + self.all_projects = [] + if not getattr(self, 'project_extension', None): + self.project_extension = '.project' + if not getattr(self, 'projects_dir', None): + self.projects_dir = self.srcnode.make_node('') + self.projects_dir.mkdir() + + # bind the classes to the object, so that subclass can provide custom generators + if not getattr(self, 'vsnode_vsdir', None): + self.vsnode_vsdir = vsnode_vsdir + if not getattr(self, 'vsnode_target', None): + self.vsnode_target = vsnode_target + if not getattr(self, 'vsnode_build_all', None): + self.vsnode_build_all = vsnode_build_all + if not getattr(self, 'vsnode_install_all', None): + self.vsnode_install_all = vsnode_install_all + if not getattr(self, 'vsnode_project_view', None): + self.vsnode_project_view = vsnode_project_view + + self.numver = '11.00' + self.vsver = '2010' + + def execute(self): + """ + Entry point + """ + self.restore() + if not self.all_envs: + self.load_envs() + self.recurse([self.run_dir]) + + # user initialization + self.init() + + # two phases for creating the solution + self.collect_projects() # add project objects into "self.all_projects" + self.write_files() # write the corresponding project and solution files + + def collect_projects(self): + """ + Fill the list self.all_projects with project objects + Fill the list of build targets + """ + self.collect_targets() + #self.add_aliases() + #self.collect_dirs() + default_project = getattr(self, 'default_project', None) + def sortfun(x): + if x.name == default_project: + return '' + return getattr(x, 'path', None) and x.path.abspath() or x.name + self.all_projects.sort(key=sortfun) + + def write_files(self): + """ + Write the project and solution files from the data collected + so far. It is unlikely that you will want to change this + """ + for p in self.all_projects: + p.write() + + # and finally write the solution file + node = self.get_solution_node() + node.parent.mkdir() + Logs.warn('Creating %r', node) + #a = dir(self.root) + #for b in a: + # print b + #print self.group_names + #print "Hallo2: ",self.root.listdir() + #print getattr(self, 'codelite_solution_name', None) + template1 = compile_template(SOLUTION_TEMPLATE) + sln_str = template1(self) + sln_str = rm_blank_lines(sln_str) + node.stealth_write(sln_str) + + def get_solution_node(self): + """ + The solution filename is required when writing the .vcproj files + return self.solution_node and if it does not exist, make one + """ + try: + return self.solution_node + except: + pass + + codelite_solution_name = getattr(self, 'codelite_solution_name', None) + if not codelite_solution_name: + codelite_solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.workspace' + setattr(self, 'codelite_solution_name', codelite_solution_name) + if os.path.isabs(codelite_solution_name): + self.solution_node = self.root.make_node(codelite_solution_name) + else: + self.solution_node = self.srcnode.make_node(codelite_solution_name) + return self.solution_node + + def project_configurations(self): + """ + Helper that returns all the pairs (config,platform) + """ + ret = [] + for c in self.configurations: + for p in self.platforms: + ret.append((c, p)) + return ret + + def collect_targets(self): + """ + Process the list of task generators + """ + for g in self.groups: + for tg in g: + if not isinstance(tg, TaskGen.task_gen): + continue + + if not hasattr(tg, 'codelite_includes'): + tg.codelite_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) + tg.post() + if not getattr(tg, 'link_task', None): + continue + + p = self.vsnode_target(self, tg) + p.collect_source() # delegate this processing + p.collect_properties() + self.all_projects.append(p) + + def add_aliases(self): + """ + Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 + We also add an alias for "make install" (disabled by default) + """ + base = getattr(self, 'projects_dir', None) or self.tg.path + + node_project = base.make_node('build_all_projects' + self.project_extension) # Node + p_build = self.vsnode_build_all(self, node_project) + p_build.collect_properties() + self.all_projects.append(p_build) + + node_project = base.make_node('install_all_projects' + self.project_extension) # Node + p_install = self.vsnode_install_all(self, node_project) + p_install.collect_properties() + self.all_projects.append(p_install) + + node_project = base.make_node('project_view' + self.project_extension) # Node + p_view = self.vsnode_project_view(self, node_project) + p_view.collect_source() + p_view.collect_properties() + self.all_projects.append(p_view) + + n = self.vsnode_vsdir(self, make_uuid(self.srcnode.abspath() + 'build_aliases'), "build_aliases") + p_build.parent = p_install.parent = p_view.parent = n + self.all_projects.append(n) + + def collect_dirs(self): + """ + Create the folder structure in the CodeLite project view + """ + seen = {} + def make_parents(proj): + # look at a project, try to make a parent + if getattr(proj, 'parent', None): + # aliases already have parents + return + x = proj.iter_path + if x in seen: + proj.parent = seen[x] + return + + # There is not vsnode_vsdir for x. + # So create a project representing the folder "x" + n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.abspath()), x.name) + n.iter_path = x.parent + self.all_projects.append(n) + + # recurse up to the project directory + if x.height() > self.srcnode.height() + 1: + make_parents(n) + + for p in self.all_projects[:]: # iterate over a copy of all projects + if not getattr(p, 'tg', None): + # but only projects that have a task generator + continue + + # make a folder for each task generator + p.iter_path = p.tg.path + make_parents(p) + diff --git a/third_party/waf/waflib/extras/color_gcc.py b/third_party/waf/waflib/extras/color_gcc.py new file mode 100644 index 0000000..0972903 --- /dev/null +++ b/third_party/waf/waflib/extras/color_gcc.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python +# encoding: utf-8 + +# Replaces the default formatter by one which understands GCC output and colorizes it. + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2012" + +import sys +from waflib import Logs + +class ColorGCCFormatter(Logs.formatter): + def __init__(self, colors): + self.colors = colors + Logs.formatter.__init__(self) + def format(self, rec): + frame = sys._getframe() + while frame: + func = frame.f_code.co_name + if func == 'exec_command': + cmd = frame.f_locals.get('cmd') + if isinstance(cmd, list) and (len(cmd) > 0) and ('gcc' in cmd[0] or 'g++' in cmd[0]): + lines = [] + for line in rec.msg.splitlines(): + if 'warning: ' in line: + lines.append(self.colors.YELLOW + line) + elif 'error: ' in line: + lines.append(self.colors.RED + line) + elif 'note: ' in line: + lines.append(self.colors.CYAN + line) + else: + lines.append(line) + rec.msg = "\n".join(lines) + frame = frame.f_back + return Logs.formatter.format(self, rec) + +def options(opt): + Logs.log.handlers[0].setFormatter(ColorGCCFormatter(Logs.colors)) + diff --git a/third_party/waf/waflib/extras/color_msvc.py b/third_party/waf/waflib/extras/color_msvc.py new file mode 100644 index 0000000..60bacb7 --- /dev/null +++ b/third_party/waf/waflib/extras/color_msvc.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# encoding: utf-8 + +# Replaces the default formatter by one which understands MSVC output and colorizes it. +# Modified from color_gcc.py + +__author__ = __maintainer__ = "Alibek Omarov <a1ba.omarov@gmail.com>" +__copyright__ = "Alibek Omarov, 2019" + +import sys +from waflib import Logs + +class ColorMSVCFormatter(Logs.formatter): + def __init__(self, colors): + self.colors = colors + Logs.formatter.__init__(self) + + def parseMessage(self, line, color): + # Split messaage from 'disk:filepath: type: message' + arr = line.split(':', 3) + if len(arr) < 4: + return line + + colored = self.colors.BOLD + arr[0] + ':' + arr[1] + ':' + self.colors.NORMAL + colored += color + arr[2] + ':' + self.colors.NORMAL + colored += arr[3] + return colored + + def format(self, rec): + frame = sys._getframe() + while frame: + func = frame.f_code.co_name + if func == 'exec_command': + cmd = frame.f_locals.get('cmd') + if isinstance(cmd, list): + # Fix file case, it may be CL.EXE or cl.exe + argv0 = cmd[0].lower() + if 'cl.exe' in argv0: + lines = [] + # This will not work with "localized" versions + # of MSVC + for line in rec.msg.splitlines(): + if ': warning ' in line: + lines.append(self.parseMessage(line, self.colors.YELLOW)) + elif ': error ' in line: + lines.append(self.parseMessage(line, self.colors.RED)) + elif ': fatal error ' in line: + lines.append(self.parseMessage(line, self.colors.RED + self.colors.BOLD)) + elif ': note: ' in line: + lines.append(self.parseMessage(line, self.colors.CYAN)) + else: + lines.append(line) + rec.msg = "\n".join(lines) + frame = frame.f_back + return Logs.formatter.format(self, rec) + +def options(opt): + Logs.log.handlers[0].setFormatter(ColorMSVCFormatter(Logs.colors)) + diff --git a/third_party/waf/waflib/extras/color_rvct.py b/third_party/waf/waflib/extras/color_rvct.py new file mode 100644 index 0000000..f89ccbd --- /dev/null +++ b/third_party/waf/waflib/extras/color_rvct.py @@ -0,0 +1,51 @@ +#!/usr/bin/env python +# encoding: utf-8 + +# Replaces the default formatter by one which understands RVCT output and colorizes it. + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2012" + +import sys +import atexit +from waflib import Logs + +errors = [] + +def show_errors(): + for i, e in enumerate(errors): + if i > 5: + break + print("Error: %s" % e) + +atexit.register(show_errors) + +class RcvtFormatter(Logs.formatter): + def __init__(self, colors): + Logs.formatter.__init__(self) + self.colors = colors + def format(self, rec): + frame = sys._getframe() + while frame: + func = frame.f_code.co_name + if func == 'exec_command': + cmd = frame.f_locals['cmd'] + if isinstance(cmd, list) and ('armcc' in cmd[0] or 'armld' in cmd[0]): + lines = [] + for line in rec.msg.splitlines(): + if 'Warning: ' in line: + lines.append(self.colors.YELLOW + line) + elif 'Error: ' in line: + lines.append(self.colors.RED + line) + errors.append(line) + elif 'note: ' in line: + lines.append(self.colors.CYAN + line) + else: + lines.append(line) + rec.msg = "\n".join(lines) + frame = frame.f_back + return Logs.formatter.format(self, rec) + +def options(opt): + Logs.log.handlers[0].setFormatter(RcvtFormatter(Logs.colors)) + diff --git a/third_party/waf/waflib/extras/compat15.py b/third_party/waf/waflib/extras/compat15.py new file mode 100644 index 0000000..0e74df8 --- /dev/null +++ b/third_party/waf/waflib/extras/compat15.py @@ -0,0 +1,406 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010 (ita) + +""" +This file is provided to enable compatibility with waf 1.5 +It was enabled by default in waf 1.6, but it is not used in waf 1.7 +""" + +import sys +from waflib import ConfigSet, Logs, Options, Scripting, Task, Build, Configure, Node, Runner, TaskGen, Utils, Errors, Context + +# the following is to bring some compatibility with waf 1.5 "import waflib.Configure → import Configure" +sys.modules['Environment'] = ConfigSet +ConfigSet.Environment = ConfigSet.ConfigSet + +sys.modules['Logs'] = Logs +sys.modules['Options'] = Options +sys.modules['Scripting'] = Scripting +sys.modules['Task'] = Task +sys.modules['Build'] = Build +sys.modules['Configure'] = Configure +sys.modules['Node'] = Node +sys.modules['Runner'] = Runner +sys.modules['TaskGen'] = TaskGen +sys.modules['Utils'] = Utils +sys.modules['Constants'] = Context +Context.SRCDIR = '' +Context.BLDDIR = '' + +from waflib.Tools import c_preproc +sys.modules['preproc'] = c_preproc + +from waflib.Tools import c_config +sys.modules['config_c'] = c_config + +ConfigSet.ConfigSet.copy = ConfigSet.ConfigSet.derive +ConfigSet.ConfigSet.set_variant = Utils.nada + +Utils.pproc = Utils.subprocess + +Build.BuildContext.add_subdirs = Build.BuildContext.recurse +Build.BuildContext.new_task_gen = Build.BuildContext.__call__ +Build.BuildContext.is_install = 0 +Node.Node.relpath_gen = Node.Node.path_from + +Utils.pproc = Utils.subprocess +Utils.get_term_cols = Logs.get_term_cols + +def cmd_output(cmd, **kw): + + silent = False + if 'silent' in kw: + silent = kw['silent'] + del(kw['silent']) + + if 'e' in kw: + tmp = kw['e'] + del(kw['e']) + kw['env'] = tmp + + kw['shell'] = isinstance(cmd, str) + kw['stdout'] = Utils.subprocess.PIPE + if silent: + kw['stderr'] = Utils.subprocess.PIPE + + try: + p = Utils.subprocess.Popen(cmd, **kw) + output = p.communicate()[0] + except OSError as e: + raise ValueError(str(e)) + + if p.returncode: + if not silent: + msg = "command execution failed: %s -> %r" % (cmd, str(output)) + raise ValueError(msg) + output = '' + return output +Utils.cmd_output = cmd_output + +def name_to_obj(self, s, env=None): + if Logs.verbose: + Logs.warn('compat: change "name_to_obj(name, env)" by "get_tgen_by_name(name)"') + return self.get_tgen_by_name(s) +Build.BuildContext.name_to_obj = name_to_obj + +def env_of_name(self, name): + try: + return self.all_envs[name] + except KeyError: + Logs.error('no such environment: '+name) + return None +Build.BuildContext.env_of_name = env_of_name + + +def set_env_name(self, name, env): + self.all_envs[name] = env + return env +Configure.ConfigurationContext.set_env_name = set_env_name + +def retrieve(self, name, fromenv=None): + try: + env = self.all_envs[name] + except KeyError: + env = ConfigSet.ConfigSet() + self.prepare_env(env) + self.all_envs[name] = env + else: + if fromenv: + Logs.warn('The environment %s may have been configured already', name) + return env +Configure.ConfigurationContext.retrieve = retrieve + +Configure.ConfigurationContext.sub_config = Configure.ConfigurationContext.recurse +Configure.ConfigurationContext.check_tool = Configure.ConfigurationContext.load +Configure.conftest = Configure.conf +Configure.ConfigurationError = Errors.ConfigurationError +Utils.WafError = Errors.WafError + +Options.OptionsContext.sub_options = Options.OptionsContext.recurse +Options.OptionsContext.tool_options = Context.Context.load +Options.Handler = Options.OptionsContext + +Task.simple_task_type = Task.task_type_from_func = Task.task_factory +Task.Task.classes = Task.classes + +def setitem(self, key, value): + if key.startswith('CCFLAGS'): + key = key[1:] + self.table[key] = value +ConfigSet.ConfigSet.__setitem__ = setitem + +@TaskGen.feature('d') +@TaskGen.before('apply_incpaths') +def old_importpaths(self): + if getattr(self, 'importpaths', []): + self.includes = self.importpaths + +from waflib import Context +eld = Context.load_tool +def load_tool(*k, **kw): + ret = eld(*k, **kw) + if 'set_options' in ret.__dict__: + if Logs.verbose: + Logs.warn('compat: rename "set_options" to options') + ret.options = ret.set_options + if 'detect' in ret.__dict__: + if Logs.verbose: + Logs.warn('compat: rename "detect" to "configure"') + ret.configure = ret.detect + return ret +Context.load_tool = load_tool + +def get_curdir(self): + return self.path.abspath() +Context.Context.curdir = property(get_curdir, Utils.nada) + +def get_srcdir(self): + return self.srcnode.abspath() +Configure.ConfigurationContext.srcdir = property(get_srcdir, Utils.nada) + +def get_blddir(self): + return self.bldnode.abspath() +Configure.ConfigurationContext.blddir = property(get_blddir, Utils.nada) + +Configure.ConfigurationContext.check_message_1 = Configure.ConfigurationContext.start_msg +Configure.ConfigurationContext.check_message_2 = Configure.ConfigurationContext.end_msg + +rev = Context.load_module +def load_module(path, encoding=None): + ret = rev(path, encoding) + if 'set_options' in ret.__dict__: + if Logs.verbose: + Logs.warn('compat: rename "set_options" to "options" (%r)', path) + ret.options = ret.set_options + if 'srcdir' in ret.__dict__: + if Logs.verbose: + Logs.warn('compat: rename "srcdir" to "top" (%r)', path) + ret.top = ret.srcdir + if 'blddir' in ret.__dict__: + if Logs.verbose: + Logs.warn('compat: rename "blddir" to "out" (%r)', path) + ret.out = ret.blddir + Utils.g_module = Context.g_module + Options.launch_dir = Context.launch_dir + return ret +Context.load_module = load_module + +old_post = TaskGen.task_gen.post +def post(self): + self.features = self.to_list(self.features) + if 'cc' in self.features: + if Logs.verbose: + Logs.warn('compat: the feature cc does not exist anymore (use "c")') + self.features.remove('cc') + self.features.append('c') + if 'cstaticlib' in self.features: + if Logs.verbose: + Logs.warn('compat: the feature cstaticlib does not exist anymore (use "cstlib" or "cxxstlib")') + self.features.remove('cstaticlib') + self.features.append(('cxx' in self.features) and 'cxxstlib' or 'cstlib') + if getattr(self, 'ccflags', None): + if Logs.verbose: + Logs.warn('compat: "ccflags" was renamed to "cflags"') + self.cflags = self.ccflags + return old_post(self) +TaskGen.task_gen.post = post + +def waf_version(*k, **kw): + Logs.warn('wrong version (waf_version was removed in waf 1.6)') +Utils.waf_version = waf_version + + +import os +@TaskGen.feature('c', 'cxx', 'd') +@TaskGen.before('apply_incpaths', 'propagate_uselib_vars') +@TaskGen.after('apply_link', 'process_source') +def apply_uselib_local(self): + """ + process the uselib_local attribute + execute after apply_link because of the execution order set on 'link_task' + """ + env = self.env + from waflib.Tools.ccroot import stlink_task + + # 1. the case of the libs defined in the project (visit ancestors first) + # the ancestors external libraries (uselib) will be prepended + self.uselib = self.to_list(getattr(self, 'uselib', [])) + self.includes = self.to_list(getattr(self, 'includes', [])) + names = self.to_list(getattr(self, 'uselib_local', [])) + get = self.bld.get_tgen_by_name + seen = set() + seen_uselib = set() + tmp = Utils.deque(names) # consume a copy of the list of names + if tmp: + if Logs.verbose: + Logs.warn('compat: "uselib_local" is deprecated, replace by "use"') + while tmp: + lib_name = tmp.popleft() + # visit dependencies only once + if lib_name in seen: + continue + + y = get(lib_name) + y.post() + seen.add(lib_name) + + # object has ancestors to process (shared libraries): add them to the end of the list + if getattr(y, 'uselib_local', None): + for x in self.to_list(getattr(y, 'uselib_local', [])): + obj = get(x) + obj.post() + if getattr(obj, 'link_task', None): + if not isinstance(obj.link_task, stlink_task): + tmp.append(x) + + # link task and flags + if getattr(y, 'link_task', None): + + link_name = y.target[y.target.rfind(os.sep) + 1:] + if isinstance(y.link_task, stlink_task): + env.append_value('STLIB', [link_name]) + else: + # some linkers can link against programs + env.append_value('LIB', [link_name]) + + # the order + self.link_task.set_run_after(y.link_task) + + # for the recompilation + self.link_task.dep_nodes += y.link_task.outputs + + # add the link path too + tmp_path = y.link_task.outputs[0].parent.bldpath() + if not tmp_path in env['LIBPATH']: + env.prepend_value('LIBPATH', [tmp_path]) + + # add ancestors uselib too - but only propagate those that have no staticlib defined + for v in self.to_list(getattr(y, 'uselib', [])): + if v not in seen_uselib: + seen_uselib.add(v) + if not env['STLIB_' + v]: + if not v in self.uselib: + self.uselib.insert(0, v) + + # if the library task generator provides 'export_includes', add to the include path + # the export_includes must be a list of paths relative to the other library + if getattr(y, 'export_includes', None): + self.includes.extend(y.to_incnodes(y.export_includes)) + +@TaskGen.feature('cprogram', 'cxxprogram', 'cstlib', 'cxxstlib', 'cshlib', 'cxxshlib', 'dprogram', 'dstlib', 'dshlib') +@TaskGen.after('apply_link') +def apply_objdeps(self): + "add the .o files produced by some other object files in the same manner as uselib_local" + names = getattr(self, 'add_objects', []) + if not names: + return + names = self.to_list(names) + + get = self.bld.get_tgen_by_name + seen = [] + while names: + x = names[0] + + # visit dependencies only once + if x in seen: + names = names[1:] + continue + + # object does not exist ? + y = get(x) + + # object has ancestors to process first ? update the list of names + if getattr(y, 'add_objects', None): + added = 0 + lst = y.to_list(y.add_objects) + lst.reverse() + for u in lst: + if u in seen: + continue + added = 1 + names = [u]+names + if added: + continue # list of names modified, loop + + # safe to process the current object + y.post() + seen.append(x) + + for t in getattr(y, 'compiled_tasks', []): + self.link_task.inputs.extend(t.outputs) + +@TaskGen.after('apply_link') +def process_obj_files(self): + if not hasattr(self, 'obj_files'): + return + for x in self.obj_files: + node = self.path.find_resource(x) + self.link_task.inputs.append(node) + +@TaskGen.taskgen_method +def add_obj_file(self, file): + """Small example on how to link object files as if they were source + obj = bld.create_obj('cc') + obj.add_obj_file('foo.o')""" + if not hasattr(self, 'obj_files'): + self.obj_files = [] + if not 'process_obj_files' in self.meths: + self.meths.append('process_obj_files') + self.obj_files.append(file) + + +old_define = Configure.ConfigurationContext.__dict__['define'] + +@Configure.conf +def define(self, key, val, quote=True, comment=''): + old_define(self, key, val, quote, comment) + if key.startswith('HAVE_'): + self.env[key] = 1 + +old_undefine = Configure.ConfigurationContext.__dict__['undefine'] + +@Configure.conf +def undefine(self, key, comment=''): + old_undefine(self, key, comment) + if key.startswith('HAVE_'): + self.env[key] = 0 + +# some people might want to use export_incdirs, but it was renamed +def set_incdirs(self, val): + Logs.warn('compat: change "export_incdirs" by "export_includes"') + self.export_includes = val +TaskGen.task_gen.export_incdirs = property(None, set_incdirs) + +def install_dir(self, path): + if not path: + return [] + + destpath = Utils.subst_vars(path, self.env) + + if self.is_install > 0: + Logs.info('* creating %s', destpath) + Utils.check_dir(destpath) + elif self.is_install < 0: + Logs.info('* removing %s', destpath) + try: + os.remove(destpath) + except OSError: + pass +Build.BuildContext.install_dir = install_dir + +# before/after names +repl = {'apply_core': 'process_source', + 'apply_lib_vars': 'process_source', + 'apply_obj_vars': 'propagate_uselib_vars', + 'exec_rule': 'process_rule' +} +def after(*k): + k = [repl.get(key, key) for key in k] + return TaskGen.after_method(*k) + +def before(*k): + k = [repl.get(key, key) for key in k] + return TaskGen.before_method(*k) +TaskGen.before = before + diff --git a/third_party/waf/waflib/extras/cppcheck.py b/third_party/waf/waflib/extras/cppcheck.py new file mode 100644 index 0000000..13ff424 --- /dev/null +++ b/third_party/waf/waflib/extras/cppcheck.py @@ -0,0 +1,591 @@ +#! /usr/bin/env python +# -*- encoding: utf-8 -*- +# Michel Mooij, michel.mooij7@gmail.com + +""" +Tool Description +================ +This module provides a waf wrapper (i.e. waftool) around the C/C++ source code +checking tool 'cppcheck'. + +See http://cppcheck.sourceforge.net/ for more information on the cppcheck tool +itself. +Note that many linux distributions already provide a ready to install version +of cppcheck. On fedora, for instance, it can be installed using yum: + + 'sudo yum install cppcheck' + + +Usage +===== +In order to use this waftool simply add it to the 'options' and 'configure' +functions of your main waf script as shown in the example below: + + def options(opt): + opt.load('cppcheck', tooldir='./waftools') + + def configure(conf): + conf.load('cppcheck') + +Note that example shown above assumes that the cppcheck waftool is located in +the sub directory named 'waftools'. + +When configured as shown in the example above, cppcheck will automatically +perform a source code analysis on all C/C++ build tasks that have been +defined in your waf build system. + +The example shown below for a C program will be used as input for cppcheck when +building the task. + + def build(bld): + bld.program(name='foo', src='foobar.c') + +The result of the source code analysis will be stored both as xml and html +files in the build location for the task. Should any error be detected by +cppcheck the build will be aborted and a link to the html report will be shown. +By default, one index.html file is created for each task generator. A global +index.html file can be obtained by setting the following variable +in the configuration section: + + conf.env.CPPCHECK_SINGLE_HTML = False + +When needed source code checking by cppcheck can be disabled per task, per +detected error or warning for a particular task. It can be also be disabled for +all tasks. + +In order to exclude a task from source code checking add the skip option to the +task as shown below: + + def build(bld): + bld.program( + name='foo', + src='foobar.c' + cppcheck_skip=True + ) + +When needed problems detected by cppcheck may be suppressed using a file +containing a list of suppression rules. The relative or absolute path to this +file can be added to the build task as shown in the example below: + + bld.program( + name='bar', + src='foobar.c', + cppcheck_suppress='bar.suppress' + ) + +A cppcheck suppress file should contain one suppress rule per line. Each of +these rules will be passed as an '--suppress=<rule>' argument to cppcheck. + +Dependencies +================ +This waftool depends on the python pygments module, it is used for source code +syntax highlighting when creating the html reports. see http://pygments.org/ for +more information on this package. + +Remarks +================ +The generation of the html report is originally based on the cppcheck-htmlreport.py +script that comes shipped with the cppcheck tool. +""" + +import sys +import xml.etree.ElementTree as ElementTree +from waflib import Task, TaskGen, Logs, Context, Options + +PYGMENTS_EXC_MSG= ''' +The required module 'pygments' could not be found. Please install it using your +platform package manager (e.g. apt-get or yum), using 'pip' or 'easy_install', +see 'http://pygments.org/download/' for installation instructions. +''' + +try: + import pygments + from pygments import formatters, lexers +except ImportError as e: + Logs.warn(PYGMENTS_EXC_MSG) + raise e + + +def options(opt): + opt.add_option('--cppcheck-skip', dest='cppcheck_skip', + default=False, action='store_true', + help='do not check C/C++ sources (default=False)') + + opt.add_option('--cppcheck-err-resume', dest='cppcheck_err_resume', + default=False, action='store_true', + help='continue in case of errors (default=False)') + + opt.add_option('--cppcheck-bin-enable', dest='cppcheck_bin_enable', + default='warning,performance,portability,style,unusedFunction', action='store', + help="cppcheck option '--enable=' for binaries (default=warning,performance,portability,style,unusedFunction)") + + opt.add_option('--cppcheck-lib-enable', dest='cppcheck_lib_enable', + default='warning,performance,portability,style', action='store', + help="cppcheck option '--enable=' for libraries (default=warning,performance,portability,style)") + + opt.add_option('--cppcheck-std-c', dest='cppcheck_std_c', + default='c99', action='store', + help='cppcheck standard to use when checking C (default=c99)') + + opt.add_option('--cppcheck-std-cxx', dest='cppcheck_std_cxx', + default='c++03', action='store', + help='cppcheck standard to use when checking C++ (default=c++03)') + + opt.add_option('--cppcheck-check-config', dest='cppcheck_check_config', + default=False, action='store_true', + help='forced check for missing buildin include files, e.g. stdio.h (default=False)') + + opt.add_option('--cppcheck-max-configs', dest='cppcheck_max_configs', + default='20', action='store', + help='maximum preprocessor (--max-configs) define iterations (default=20)') + + opt.add_option('--cppcheck-jobs', dest='cppcheck_jobs', + default='1', action='store', + help='number of jobs (-j) to do the checking work (default=1)') + +def configure(conf): + if conf.options.cppcheck_skip: + conf.env.CPPCHECK_SKIP = [True] + conf.env.CPPCHECK_STD_C = conf.options.cppcheck_std_c + conf.env.CPPCHECK_STD_CXX = conf.options.cppcheck_std_cxx + conf.env.CPPCHECK_MAX_CONFIGS = conf.options.cppcheck_max_configs + conf.env.CPPCHECK_BIN_ENABLE = conf.options.cppcheck_bin_enable + conf.env.CPPCHECK_LIB_ENABLE = conf.options.cppcheck_lib_enable + conf.env.CPPCHECK_JOBS = conf.options.cppcheck_jobs + if conf.options.cppcheck_jobs != '1' and ('unusedFunction' in conf.options.cppcheck_bin_enable or 'unusedFunction' in conf.options.cppcheck_lib_enable or 'all' in conf.options.cppcheck_bin_enable or 'all' in conf.options.cppcheck_lib_enable): + Logs.warn('cppcheck: unusedFunction cannot be used with multiple threads, cppcheck will disable it automatically') + conf.find_program('cppcheck', var='CPPCHECK') + + # set to True to get a single index.html file + conf.env.CPPCHECK_SINGLE_HTML = False + +@TaskGen.feature('c') +@TaskGen.feature('cxx') +def cppcheck_execute(self): + if hasattr(self.bld, 'conf'): + return + if len(self.env.CPPCHECK_SKIP) or Options.options.cppcheck_skip: + return + if getattr(self, 'cppcheck_skip', False): + return + task = self.create_task('cppcheck') + task.cmd = _tgen_create_cmd(self) + task.fatal = [] + if not Options.options.cppcheck_err_resume: + task.fatal.append('error') + + +def _tgen_create_cmd(self): + features = getattr(self, 'features', []) + std_c = self.env.CPPCHECK_STD_C + std_cxx = self.env.CPPCHECK_STD_CXX + max_configs = self.env.CPPCHECK_MAX_CONFIGS + bin_enable = self.env.CPPCHECK_BIN_ENABLE + lib_enable = self.env.CPPCHECK_LIB_ENABLE + jobs = self.env.CPPCHECK_JOBS + + cmd = self.env.CPPCHECK + args = ['--inconclusive','--report-progress','--verbose','--xml','--xml-version=2'] + args.append('--max-configs=%s' % max_configs) + args.append('-j %s' % jobs) + + if 'cxx' in features: + args.append('--language=c++') + args.append('--std=%s' % std_cxx) + else: + args.append('--language=c') + args.append('--std=%s' % std_c) + + if Options.options.cppcheck_check_config: + args.append('--check-config') + + if set(['cprogram','cxxprogram']) & set(features): + args.append('--enable=%s' % bin_enable) + else: + args.append('--enable=%s' % lib_enable) + + for src in self.to_list(getattr(self, 'source', [])): + if not isinstance(src, str): + src = repr(src) + args.append(src) + for inc in self.to_incnodes(self.to_list(getattr(self, 'includes', []))): + if not isinstance(inc, str): + inc = repr(inc) + args.append('-I%s' % inc) + for inc in self.to_incnodes(self.to_list(self.env.INCLUDES)): + if not isinstance(inc, str): + inc = repr(inc) + args.append('-I%s' % inc) + return cmd + args + + +class cppcheck(Task.Task): + quiet = True + + def run(self): + stderr = self.generator.bld.cmd_and_log(self.cmd, quiet=Context.STDERR, output=Context.STDERR) + self._save_xml_report(stderr) + defects = self._get_defects(stderr) + index = self._create_html_report(defects) + self._errors_evaluate(defects, index) + return 0 + + def _save_xml_report(self, s): + '''use cppcheck xml result string, add the command string used to invoke cppcheck + and save as xml file. + ''' + header = '%s\n' % s.splitlines()[0] + root = ElementTree.fromstring(s) + cmd = ElementTree.SubElement(root.find('cppcheck'), 'cmd') + cmd.text = str(self.cmd) + body = ElementTree.tostring(root).decode('us-ascii') + body_html_name = 'cppcheck-%s.xml' % self.generator.get_name() + if self.env.CPPCHECK_SINGLE_HTML: + body_html_name = 'cppcheck.xml' + node = self.generator.path.get_bld().find_or_declare(body_html_name) + node.write(header + body) + + def _get_defects(self, xml_string): + '''evaluate the xml string returned by cppcheck (on sdterr) and use it to create + a list of defects. + ''' + defects = [] + for error in ElementTree.fromstring(xml_string).iter('error'): + defect = {} + defect['id'] = error.get('id') + defect['severity'] = error.get('severity') + defect['msg'] = str(error.get('msg')).replace('<','<') + defect['verbose'] = error.get('verbose') + for location in error.findall('location'): + defect['file'] = location.get('file') + defect['line'] = str(int(location.get('line')) - 1) + defects.append(defect) + return defects + + def _create_html_report(self, defects): + files, css_style_defs = self._create_html_files(defects) + index = self._create_html_index(files) + self._create_css_file(css_style_defs) + return index + + def _create_html_files(self, defects): + sources = {} + defects = [defect for defect in defects if 'file' in defect] + for defect in defects: + name = defect['file'] + if not name in sources: + sources[name] = [defect] + else: + sources[name].append(defect) + + files = {} + css_style_defs = None + bpath = self.generator.path.get_bld().abspath() + names = list(sources.keys()) + for i in range(0,len(names)): + name = names[i] + if self.env.CPPCHECK_SINGLE_HTML: + htmlfile = 'cppcheck/%i.html' % (i) + else: + htmlfile = 'cppcheck/%s%i.html' % (self.generator.get_name(),i) + errors = sources[name] + files[name] = { 'htmlfile': '%s/%s' % (bpath, htmlfile), 'errors': errors } + css_style_defs = self._create_html_file(name, htmlfile, errors) + return files, css_style_defs + + def _create_html_file(self, sourcefile, htmlfile, errors): + name = self.generator.get_name() + root = ElementTree.fromstring(CPPCHECK_HTML_FILE) + title = root.find('head/title') + title.text = 'cppcheck - report - %s' % name + + body = root.find('body') + for div in body.findall('div'): + if div.get('id') == 'page': + page = div + break + for div in page.findall('div'): + if div.get('id') == 'header': + h1 = div.find('h1') + h1.text = 'cppcheck report - %s' % name + if div.get('id') == 'menu': + indexlink = div.find('a') + if self.env.CPPCHECK_SINGLE_HTML: + indexlink.attrib['href'] = 'index.html' + else: + indexlink.attrib['href'] = 'index-%s.html' % name + if div.get('id') == 'content': + content = div + srcnode = self.generator.bld.root.find_node(sourcefile) + hl_lines = [e['line'] for e in errors if 'line' in e] + formatter = CppcheckHtmlFormatter(linenos=True, style='colorful', hl_lines=hl_lines, lineanchors='line') + formatter.errors = [e for e in errors if 'line' in e] + css_style_defs = formatter.get_style_defs('.highlight') + lexer = pygments.lexers.guess_lexer_for_filename(sourcefile, "") + s = pygments.highlight(srcnode.read(), lexer, formatter) + table = ElementTree.fromstring(s) + content.append(table) + + s = ElementTree.tostring(root, method='html').decode('us-ascii') + s = CCPCHECK_HTML_TYPE + s + node = self.generator.path.get_bld().find_or_declare(htmlfile) + node.write(s) + return css_style_defs + + def _create_html_index(self, files): + name = self.generator.get_name() + root = ElementTree.fromstring(CPPCHECK_HTML_FILE) + title = root.find('head/title') + title.text = 'cppcheck - report - %s' % name + + body = root.find('body') + for div in body.findall('div'): + if div.get('id') == 'page': + page = div + break + for div in page.findall('div'): + if div.get('id') == 'header': + h1 = div.find('h1') + h1.text = 'cppcheck report - %s' % name + if div.get('id') == 'content': + content = div + self._create_html_table(content, files) + if div.get('id') == 'menu': + indexlink = div.find('a') + if self.env.CPPCHECK_SINGLE_HTML: + indexlink.attrib['href'] = 'index.html' + else: + indexlink.attrib['href'] = 'index-%s.html' % name + + s = ElementTree.tostring(root, method='html').decode('us-ascii') + s = CCPCHECK_HTML_TYPE + s + index_html_name = 'cppcheck/index-%s.html' % name + if self.env.CPPCHECK_SINGLE_HTML: + index_html_name = 'cppcheck/index.html' + node = self.generator.path.get_bld().find_or_declare(index_html_name) + node.write(s) + return node + + def _create_html_table(self, content, files): + table = ElementTree.fromstring(CPPCHECK_HTML_TABLE) + for name, val in files.items(): + f = val['htmlfile'] + s = '<tr><td colspan="4"><a href="%s">%s</a></td></tr>\n' % (f,name) + row = ElementTree.fromstring(s) + table.append(row) + + errors = sorted(val['errors'], key=lambda e: int(e['line']) if 'line' in e else sys.maxint) + for e in errors: + if not 'line' in e: + s = '<tr><td></td><td>%s</td><td>%s</td><td>%s</td></tr>\n' % (e['id'], e['severity'], e['msg']) + else: + attr = '' + if e['severity'] == 'error': + attr = 'class="error"' + s = '<tr><td><a href="%s#line-%s">%s</a></td>' % (f, e['line'], e['line']) + s+= '<td>%s</td><td>%s</td><td %s>%s</td></tr>\n' % (e['id'], e['severity'], attr, e['msg']) + row = ElementTree.fromstring(s) + table.append(row) + content.append(table) + + def _create_css_file(self, css_style_defs): + css = str(CPPCHECK_CSS_FILE) + if css_style_defs: + css = "%s\n%s\n" % (css, css_style_defs) + node = self.generator.path.get_bld().find_or_declare('cppcheck/style.css') + node.write(css) + + def _errors_evaluate(self, errors, http_index): + name = self.generator.get_name() + fatal = self.fatal + severity = [err['severity'] for err in errors] + problems = [err for err in errors if err['severity'] != 'information'] + + if set(fatal) & set(severity): + exc = "\n" + exc += "\nccpcheck detected fatal error(s) in task '%s', see report for details:" % name + exc += "\n file://%r" % (http_index) + exc += "\n" + self.generator.bld.fatal(exc) + + elif len(problems): + msg = "\nccpcheck detected (possible) problem(s) in task '%s', see report for details:" % name + msg += "\n file://%r" % http_index + msg += "\n" + Logs.error(msg) + + +class CppcheckHtmlFormatter(pygments.formatters.HtmlFormatter): + errors = [] + + def wrap(self, source, outfile): + line_no = 1 + for i, t in super(CppcheckHtmlFormatter, self).wrap(source, outfile): + # If this is a source code line we want to add a span tag at the end. + if i == 1: + for error in self.errors: + if int(error['line']) == line_no: + t = t.replace('\n', CPPCHECK_HTML_ERROR % error['msg']) + line_no += 1 + yield i, t + + +CCPCHECK_HTML_TYPE = \ +'<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">\n' + +CPPCHECK_HTML_FILE = """ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd" [<!ENTITY nbsp " ">]> +<html> + <head> + <title>cppcheck - report - XXX</title> + <link href="style.css" rel="stylesheet" type="text/css" /> + <style type="text/css"> + </style> + </head> + <body class="body"> + <div id="page-header"> </div> + <div id="page"> + <div id="header"> + <h1>cppcheck report - XXX</h1> + </div> + <div id="menu"> + <a href="index.html">Defect list</a> + </div> + <div id="content"> + </div> + <div id="footer"> + <div>cppcheck - a tool for static C/C++ code analysis</div> + <div> + Internet: <a href="http://cppcheck.sourceforge.net">http://cppcheck.sourceforge.net</a><br/> + Forum: <a href="http://apps.sourceforge.net/phpbb/cppcheck/">http://apps.sourceforge.net/phpbb/cppcheck/</a><br/> + IRC: #cppcheck at irc.freenode.net + </div> + + </div> + + </div> + <div id="page-footer"> </div> + </body> +</html> +""" + +CPPCHECK_HTML_TABLE = """ +<table> + <tr> + <th>Line</th> + <th>Id</th> + <th>Severity</th> + <th>Message</th> + </tr> +</table> +""" + +CPPCHECK_HTML_ERROR = \ +'<span style="background: #ffaaaa;padding: 3px;"><--- %s</span>\n' + +CPPCHECK_CSS_FILE = """ +body.body { + font-family: Arial; + font-size: 13px; + background-color: black; + padding: 0px; + margin: 0px; +} + +.error { + font-family: Arial; + font-size: 13px; + background-color: #ffb7b7; + padding: 0px; + margin: 0px; +} + +th, td { + min-width: 100px; + text-align: left; +} + +#page-header { + clear: both; + width: 1200px; + margin: 20px auto 0px auto; + height: 10px; + border-bottom-width: 2px; + border-bottom-style: solid; + border-bottom-color: #aaaaaa; +} + +#page { + width: 1160px; + margin: auto; + border-left-width: 2px; + border-left-style: solid; + border-left-color: #aaaaaa; + border-right-width: 2px; + border-right-style: solid; + border-right-color: #aaaaaa; + background-color: White; + padding: 20px; +} + +#page-footer { + clear: both; + width: 1200px; + margin: auto; + height: 10px; + border-top-width: 2px; + border-top-style: solid; + border-top-color: #aaaaaa; +} + +#header { + width: 100%; + height: 70px; + background-image: url(logo.png); + background-repeat: no-repeat; + background-position: left top; + border-bottom-style: solid; + border-bottom-width: thin; + border-bottom-color: #aaaaaa; +} + +#menu { + margin-top: 5px; + text-align: left; + float: left; + width: 100px; + height: 300px; +} + +#menu > a { + margin-left: 10px; + display: block; +} + +#content { + float: left; + width: 1020px; + margin: 5px; + padding: 0px 10px 10px 10px; + border-left-style: solid; + border-left-width: thin; + border-left-color: #aaaaaa; +} + +#footer { + padding-bottom: 5px; + padding-top: 5px; + border-top-style: solid; + border-top-width: thin; + border-top-color: #aaaaaa; + clear: both; + font-size: 10px; +} + +#footer > div { + float: left; + width: 33%; +} + +""" + diff --git a/third_party/waf/waflib/extras/cpplint.py b/third_party/waf/waflib/extras/cpplint.py new file mode 100644 index 0000000..afc09c9 --- /dev/null +++ b/third_party/waf/waflib/extras/cpplint.py @@ -0,0 +1,209 @@ +#! /usr/bin/env python +# encoding: utf-8 +# +# written by Sylvain Rouquette, 2014 + +''' + +This is an extra tool, not bundled with the default waf binary. +To add the cpplint tool to the waf file: +$ ./waf-light --tools=compat15,cpplint + +this tool also requires cpplint for python. +If you have PIP, you can install it like this: pip install cpplint + +When using this tool, the wscript will look like: + + def options(opt): + opt.load('compiler_cxx cpplint') + + def configure(conf): + conf.load('compiler_cxx cpplint') + # optional, you can also specify them on the command line + conf.env.CPPLINT_FILTERS = ','.join(( + '-whitespace/newline', # c++11 lambda + '-readability/braces', # c++11 constructor + '-whitespace/braces', # c++11 constructor + '-build/storage_class', # c++11 for-range + '-whitespace/blank_line', # user pref + '-whitespace/labels' # user pref + )) + + def build(bld): + bld(features='cpplint', source='main.cpp', target='app') + # add include files, because they aren't usually built + bld(features='cpplint', source=bld.path.ant_glob('**/*.hpp')) +''' + +from __future__ import absolute_import +import sys, re +import logging +from waflib import Errors, Task, TaskGen, Logs, Options, Node, Utils + + +critical_errors = 0 +CPPLINT_FORMAT = '[CPPLINT] %(filename)s:\nline %(linenum)s, severity %(confidence)s, category: %(category)s\n%(message)s\n' +RE_EMACS = re.compile(r'(?P<filename>.*):(?P<linenum>\d+): (?P<message>.*) \[(?P<category>.*)\] \[(?P<confidence>\d+)\]') +CPPLINT_RE = { + 'waf': RE_EMACS, + 'emacs': RE_EMACS, + 'vs7': re.compile(r'(?P<filename>.*)\((?P<linenum>\d+)\): (?P<message>.*) \[(?P<category>.*)\] \[(?P<confidence>\d+)\]'), + 'eclipse': re.compile(r'(?P<filename>.*):(?P<linenum>\d+): warning: (?P<message>.*) \[(?P<category>.*)\] \[(?P<confidence>\d+)\]'), +} +CPPLINT_STR = ('${CPPLINT} ' + '--verbose=${CPPLINT_LEVEL} ' + '--output=${CPPLINT_OUTPUT} ' + '--filter=${CPPLINT_FILTERS} ' + '--root=${CPPLINT_ROOT} ' + '--linelength=${CPPLINT_LINE_LENGTH} ') + + +def options(opt): + opt.add_option('--cpplint-filters', type='string', + default='', dest='CPPLINT_FILTERS', + help='add filters to cpplint') + opt.add_option('--cpplint-length', type='int', + default=80, dest='CPPLINT_LINE_LENGTH', + help='specify the line length (default: 80)') + opt.add_option('--cpplint-level', default=1, type='int', dest='CPPLINT_LEVEL', + help='specify the log level (default: 1)') + opt.add_option('--cpplint-break', default=5, type='int', dest='CPPLINT_BREAK', + help='break the build if error >= level (default: 5)') + opt.add_option('--cpplint-root', type='string', + default='', dest='CPPLINT_ROOT', + help='root directory used to derive header guard') + opt.add_option('--cpplint-skip', action='store_true', + default=False, dest='CPPLINT_SKIP', + help='skip cpplint during build') + opt.add_option('--cpplint-output', type='string', + default='waf', dest='CPPLINT_OUTPUT', + help='select output format (waf, emacs, vs7, eclipse)') + + +def configure(conf): + try: + conf.find_program('cpplint', var='CPPLINT') + except Errors.ConfigurationError: + conf.env.CPPLINT_SKIP = True + + +class cpplint_formatter(Logs.formatter, object): + def __init__(self, fmt): + logging.Formatter.__init__(self, CPPLINT_FORMAT) + self.fmt = fmt + + def format(self, rec): + if self.fmt == 'waf': + result = CPPLINT_RE[self.fmt].match(rec.msg).groupdict() + rec.msg = CPPLINT_FORMAT % result + if rec.levelno <= logging.INFO: + rec.c1 = Logs.colors.CYAN + return super(cpplint_formatter, self).format(rec) + + +class cpplint_handler(Logs.log_handler, object): + def __init__(self, stream=sys.stderr, **kw): + super(cpplint_handler, self).__init__(stream, **kw) + self.stream = stream + + def emit(self, rec): + rec.stream = self.stream + self.emit_override(rec) + self.flush() + + +class cpplint_wrapper(object): + def __init__(self, logger, threshold, fmt): + self.logger = logger + self.threshold = threshold + self.fmt = fmt + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, traceback): + if isinstance(exc_value, Utils.subprocess.CalledProcessError): + messages = [m for m in exc_value.output.splitlines() + if 'Done processing' not in m + and 'Total errors found' not in m] + for message in messages: + self.write(message) + return True + + def write(self, message): + global critical_errors + result = CPPLINT_RE[self.fmt].match(message) + if not result: + return + level = int(result.groupdict()['confidence']) + if level >= self.threshold: + critical_errors += 1 + if level <= 2: + self.logger.info(message) + elif level <= 4: + self.logger.warning(message) + else: + self.logger.error(message) + + +cpplint_logger = None +def get_cpplint_logger(fmt): + global cpplint_logger + if cpplint_logger: + return cpplint_logger + cpplint_logger = logging.getLogger('cpplint') + hdlr = cpplint_handler() + hdlr.setFormatter(cpplint_formatter(fmt)) + cpplint_logger.addHandler(hdlr) + cpplint_logger.setLevel(logging.DEBUG) + return cpplint_logger + + +class cpplint(Task.Task): + color = 'PINK' + + def __init__(self, *k, **kw): + super(cpplint, self).__init__(*k, **kw) + + def run(self): + global critical_errors + with cpplint_wrapper(get_cpplint_logger(self.env.CPPLINT_OUTPUT), self.env.CPPLINT_BREAK, self.env.CPPLINT_OUTPUT): + params = {key: str(self.env[key]) for key in self.env if 'CPPLINT_' in key} + if params['CPPLINT_OUTPUT'] == 'waf': + params['CPPLINT_OUTPUT'] = 'emacs' + params['CPPLINT'] = self.env.get_flat('CPPLINT') + cmd = Utils.subst_vars(CPPLINT_STR, params) + env = self.env.env or None + Utils.subprocess.check_output(cmd + self.inputs[0].abspath(), + stderr=Utils.subprocess.STDOUT, + env=env, shell=True) + return critical_errors + +@TaskGen.extension('.h', '.hh', '.hpp', '.hxx') +def cpplint_includes(self, node): + pass + +@TaskGen.feature('cpplint') +@TaskGen.before_method('process_source') +def post_cpplint(self): + if not self.env.CPPLINT_INITIALIZED: + for key, value in Options.options.__dict__.items(): + if not key.startswith('CPPLINT_') or self.env[key]: + continue + self.env[key] = value + self.env.CPPLINT_INITIALIZED = True + + if self.env.CPPLINT_SKIP: + return + + if not self.env.CPPLINT_OUTPUT in CPPLINT_RE: + return + + for src in self.to_list(getattr(self, 'source', [])): + if isinstance(src, Node.Node): + node = src + else: + node = self.path.find_or_declare(src) + if not node: + self.bld.fatal('Could not find %r' % src) + self.create_task('cpplint', node) diff --git a/third_party/waf/waflib/extras/cross_gnu.py b/third_party/waf/waflib/extras/cross_gnu.py new file mode 100644 index 0000000..309f53b --- /dev/null +++ b/third_party/waf/waflib/extras/cross_gnu.py @@ -0,0 +1,227 @@ +#!/usr/bin/python +# -*- coding: utf-8 vi:ts=4:noexpandtab +# Tool to provide dedicated variables for cross-compilation + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2014" + +""" +This tool allows to use environment variables to define cross-compilation +variables intended for build variants. + +The variables are obtained from the environment in 3 ways: + +1. By defining CHOST, they can be derived as ${CHOST}-${TOOL} +2. By defining HOST_x +3. By defining ${CHOST//-/_}_x + +else one can set ``cfg.env.CHOST`` in ``wscript`` before loading ``cross_gnu``. + +Usage: + +- In your build script:: + + def configure(cfg): + ... + for variant in x_variants: + setenv(variant) + conf.load('cross_gnu') + conf.xcheck_host_var('POUET') + ... + + +- Then:: + + CHOST=arm-hardfloat-linux-gnueabi waf configure + env arm-hardfloat-linux-gnueabi-CC="clang -..." waf configure + CFLAGS=... CHOST=arm-hardfloat-linux-gnueabi HOST_CFLAGS=-g waf configure + HOST_CC="clang -..." waf configure + +This example ``wscript`` compiles to Microchip PIC (xc16-gcc-xyz must be in PATH): + +.. code:: python + + from waflib import Configure + + #from https://gist.github.com/rpuntaie/2bddfb5d7b77db26415ee14371289971 + import waf_variants + + variants='pc fw/variant1 fw/variant2'.split() + + top = "." + out = "../build" + + PIC = '33FJ128GP804' #dsPICxxx + + @Configure.conf + def gcc_modifier_xc16(cfg): + v = cfg.env + v.cprogram_PATTERN = '%s.elf' + v.LINKFLAGS_cprogram = ','.join(['-Wl','','','--defsym=__MPLAB_BUILD=0','','--script=p'+PIC+'.gld', + '--stack=16','--check-sections','--data-init','--pack-data','--handles','--isr','--no-gc-sections', + '--fill-upper=0','--stackguard=16','--no-force-link','--smart-io']) #,'--report-mem']) + v.CFLAGS_cprogram=['-mcpu='+PIC,'-omf=elf','-mlarge-code','-msmart-io=1', + '-msfr-warn=off','-mno-override-inline','-finline','-Winline'] + + def configure(cfg): + if 'fw' in cfg.variant: #firmware + cfg.env.DEST_OS = 'xc16' #cfg.env.CHOST = 'xc16' #works too + cfg.load('c cross_gnu') #cfg.env.CHOST becomes ['xc16'] + ... + else: #configure for pc SW + ... + + def build(bld): + if 'fw' in bld.variant: #firmware + bld.program(source='maintst.c', target='maintst'); + bld(source='maintst.elf', target='maintst.hex', rule="xc16-bin2hex ${SRC} -a -omf=elf") + else: #build for pc SW + ... + +""" + +import os +from waflib import Utils, Configure +from waflib.Tools import ccroot, gcc + +try: + from shlex import quote +except ImportError: + from pipes import quote + +def get_chost_stuff(conf): + """ + Get the CHOST environment variable contents + """ + chost = None + chost_envar = None + if conf.env.CHOST: + chost = conf.env.CHOST[0] + chost_envar = chost.replace('-', '_') + return chost, chost_envar + + +@Configure.conf +def xcheck_var(conf, name, wafname=None, cross=False): + wafname = wafname or name + + if wafname in conf.env: + value = conf.env[wafname] + if isinstance(value, str): + value = [value] + else: + envar = os.environ.get(name) + if not envar: + return + value = Utils.to_list(envar) if envar != '' else [envar] + + conf.env[wafname] = value + if cross: + pretty = 'cross-compilation %s' % wafname + else: + pretty = wafname + conf.msg('Will use %s' % pretty, " ".join(quote(x) for x in value)) + +@Configure.conf +def xcheck_host_prog(conf, name, tool, wafname=None): + wafname = wafname or name + + chost, chost_envar = get_chost_stuff(conf) + + specific = None + if chost: + specific = os.environ.get('%s_%s' % (chost_envar, name)) + + if specific: + value = Utils.to_list(specific) + conf.env[wafname] += value + conf.msg('Will use cross-compilation %s from %s_%s' % (name, chost_envar, name), + " ".join(quote(x) for x in value)) + return + else: + envar = os.environ.get('HOST_%s' % name) + if envar is not None: + value = Utils.to_list(envar) + conf.env[wafname] = value + conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name), + " ".join(quote(x) for x in value)) + return + + if conf.env[wafname]: + return + + value = None + if chost: + value = '%s-%s' % (chost, tool) + + if value: + conf.env[wafname] = value + conf.msg('Will use cross-compilation %s from CHOST' % wafname, value) + +@Configure.conf +def xcheck_host_envar(conf, name, wafname=None): + wafname = wafname or name + + chost, chost_envar = get_chost_stuff(conf) + + specific = None + if chost: + specific = os.environ.get('%s_%s' % (chost_envar, name)) + + if specific: + value = Utils.to_list(specific) + conf.env[wafname] += value + conf.msg('Will use cross-compilation %s from %s_%s' \ + % (name, chost_envar, name), + " ".join(quote(x) for x in value)) + return + + + envar = os.environ.get('HOST_%s' % name) + if envar is None: + return + + value = Utils.to_list(envar) if envar != '' else [envar] + + conf.env[wafname] = value + conf.msg('Will use cross-compilation %s from HOST_%s' % (name, name), + " ".join(quote(x) for x in value)) + + +@Configure.conf +def xcheck_host(conf): + conf.xcheck_var('CHOST', cross=True) + conf.env.CHOST = conf.env.CHOST or [conf.env.DEST_OS] + conf.env.DEST_OS = conf.env.CHOST[0].replace('-','_') + conf.xcheck_host_prog('CC', 'gcc') + conf.xcheck_host_prog('CXX', 'g++') + conf.xcheck_host_prog('LINK_CC', 'gcc') + conf.xcheck_host_prog('LINK_CXX', 'g++') + conf.xcheck_host_prog('AR', 'ar') + conf.xcheck_host_prog('AS', 'as') + conf.xcheck_host_prog('LD', 'ld') + conf.xcheck_host_envar('CFLAGS') + conf.xcheck_host_envar('CXXFLAGS') + conf.xcheck_host_envar('LDFLAGS', 'LINKFLAGS') + conf.xcheck_host_envar('LIB') + conf.xcheck_host_envar('PKG_CONFIG_LIBDIR') + conf.xcheck_host_envar('PKG_CONFIG_PATH') + + if not conf.env.env: + conf.env.env = {} + conf.env.env.update(os.environ) + if conf.env.PKG_CONFIG_LIBDIR: + conf.env.env['PKG_CONFIG_LIBDIR'] = conf.env.PKG_CONFIG_LIBDIR[0] + if conf.env.PKG_CONFIG_PATH: + conf.env.env['PKG_CONFIG_PATH'] = conf.env.PKG_CONFIG_PATH[0] + +def configure(conf): + """ + Configuration example for gcc, it will not work for g++/clang/clang++ + """ + conf.xcheck_host() + conf.gcc_common_flags() + conf.gcc_modifier_platform() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/extras/cython.py b/third_party/waf/waflib/extras/cython.py new file mode 100644 index 0000000..591c274 --- /dev/null +++ b/third_party/waf/waflib/extras/cython.py @@ -0,0 +1,147 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010-2015 + +import re +from waflib import Task, Logs +from waflib.TaskGen import extension + +cy_api_pat = re.compile(r'\s*?cdef\s*?(public|api)\w*') +re_cyt = re.compile(r""" + ^\s* # must begin with some whitespace characters + (?:from\s+(\w+)(?:\.\w+)*\s+)? # optionally match "from foo(.baz)" and capture foo + c?import\s(\w+|[*]) # require "import bar" and capture bar + """, re.M | re.VERBOSE) + +@extension('.pyx') +def add_cython_file(self, node): + """ + Process a *.pyx* file given in the list of source files. No additional + feature is required:: + + def build(bld): + bld(features='c cshlib pyext', source='main.c foo.pyx', target='app') + """ + ext = '.c' + if 'cxx' in self.features: + self.env.append_unique('CYTHONFLAGS', '--cplus') + ext = '.cc' + + for x in getattr(self, 'cython_includes', []): + # TODO re-use these nodes in "scan" below + d = self.path.find_dir(x) + if d: + self.env.append_unique('CYTHONFLAGS', '-I%s' % d.abspath()) + + tsk = self.create_task('cython', node, node.change_ext(ext)) + self.source += tsk.outputs + +class cython(Task.Task): + run_str = '${CYTHON} ${CYTHONFLAGS} -o ${TGT[0].abspath()} ${SRC}' + color = 'GREEN' + + vars = ['INCLUDES'] + """ + Rebuild whenever the INCLUDES change. The variables such as CYTHONFLAGS will be appended + by the metaclass. + """ + + ext_out = ['.h'] + """ + The creation of a .h file is known only after the build has begun, so it is not + possible to compute a build order just by looking at the task inputs/outputs. + """ + + def runnable_status(self): + """ + Perform a double-check to add the headers created by cython + to the output nodes. The scanner is executed only when the cython task + must be executed (optimization). + """ + ret = super(cython, self).runnable_status() + if ret == Task.ASK_LATER: + return ret + for x in self.generator.bld.raw_deps[self.uid()]: + if x.startswith('header:'): + self.outputs.append(self.inputs[0].parent.find_or_declare(x.replace('header:', ''))) + return super(cython, self).runnable_status() + + def post_run(self): + for x in self.outputs: + if x.name.endswith('.h'): + if not x.exists(): + if Logs.verbose: + Logs.warn('Expected %r', x.abspath()) + x.write('') + return Task.Task.post_run(self) + + def scan(self): + """ + Return the dependent files (.pxd) by looking in the include folders. + Put the headers to generate in the custom list "bld.raw_deps". + To inspect the scanne results use:: + + $ waf clean build --zones=deps + """ + node = self.inputs[0] + txt = node.read() + + mods = set() + for m in re_cyt.finditer(txt): + if m.group(1): # matches "from foo import bar" + mods.add(m.group(1)) + else: + mods.add(m.group(2)) + + Logs.debug('cython: mods %r', mods) + incs = getattr(self.generator, 'cython_includes', []) + incs = [self.generator.path.find_dir(x) for x in incs] + incs.append(node.parent) + + found = [] + missing = [] + for x in sorted(mods): + for y in incs: + k = y.find_resource(x + '.pxd') + if k: + found.append(k) + break + else: + missing.append(x) + + # the cython file implicitly depends on a pxd file that might be present + implicit = node.parent.find_resource(node.name[:-3] + 'pxd') + if implicit: + found.append(implicit) + + Logs.debug('cython: found %r', found) + + # Now the .h created - store them in bld.raw_deps for later use + has_api = False + has_public = False + for l in txt.splitlines(): + if cy_api_pat.match(l): + if ' api ' in l: + has_api = True + if ' public ' in l: + has_public = True + name = node.name.replace('.pyx', '') + if has_api: + missing.append('header:%s_api.h' % name) + if has_public: + missing.append('header:%s.h' % name) + + return (found, missing) + +def options(ctx): + ctx.add_option('--cython-flags', action='store', default='', help='space separated list of flags to pass to cython') + +def configure(ctx): + if not ctx.env.CC and not ctx.env.CXX: + ctx.fatal('Load a C/C++ compiler first') + if not ctx.env.PYTHON: + ctx.fatal('Load the python tool first!') + ctx.find_program('cython', var='CYTHON') + if hasattr(ctx.options, 'cython_flags'): + ctx.env.CYTHONFLAGS = ctx.options.cython_flags + diff --git a/third_party/waf/waflib/extras/dcc.py b/third_party/waf/waflib/extras/dcc.py new file mode 100644 index 0000000..c1a57c0 --- /dev/null +++ b/third_party/waf/waflib/extras/dcc.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Jérôme Carretero, 2011 (zougloub) + +from waflib import Options +from waflib.Tools import ccroot +from waflib.Configure import conf + +@conf +def find_dcc(conf): + conf.find_program(['dcc'], var='CC', path_list=getattr(Options.options, 'diabbindir', "")) + conf.env.CC_NAME = 'dcc' + +@conf +def find_dld(conf): + conf.find_program(['dld'], var='LINK_CC', path_list=getattr(Options.options, 'diabbindir', "")) + conf.env.LINK_CC_NAME = 'dld' + +@conf +def find_dar(conf): + conf.find_program(['dar'], var='AR', path_list=getattr(Options.options, 'diabbindir', "")) + conf.env.AR_NAME = 'dar' + conf.env.ARFLAGS = 'rcs' + +@conf +def find_ddump(conf): + conf.find_program(['ddump'], var='DDUMP', path_list=getattr(Options.options, 'diabbindir', "")) + +@conf +def dcc_common_flags(conf): + v = conf.env + v['CC_SRC_F'] = [] + v['CC_TGT_F'] = ['-c', '-o'] + + # linker + if not v['LINK_CC']: + v['LINK_CC'] = v['CC'] + v['CCLNK_SRC_F'] = [] + v['CCLNK_TGT_F'] = ['-o'] + v['CPPPATH_ST'] = '-I%s' + v['DEFINES_ST'] = '-D%s' + + v['LIB_ST'] = '-l:%s' # template for adding libs + v['LIBPATH_ST'] = '-L%s' # template for adding libpaths + v['STLIB_ST'] = '-l:%s' + v['STLIBPATH_ST'] = '-L%s' + v['RPATH_ST'] = '-Wl,-rpath,%s' + #v['STLIB_MARKER'] = '-Wl,-Bstatic' + + # program + v['cprogram_PATTERN'] = '%s.elf' + + # static lib + v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] + v['cstlib_PATTERN'] = 'lib%s.a' + +def configure(conf): + conf.find_dcc() + conf.find_dar() + conf.find_dld() + conf.find_ddump() + conf.dcc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + +def options(opt): + """ + Add the ``--with-diab-bindir`` command-line options. + """ + opt.add_option('--with-diab-bindir', type='string', dest='diabbindir', help = 'Specify alternate diab bin folder', default="") + diff --git a/third_party/waf/waflib/extras/distnet.py b/third_party/waf/waflib/extras/distnet.py new file mode 100644 index 0000000..8084b15 --- /dev/null +++ b/third_party/waf/waflib/extras/distnet.py @@ -0,0 +1,432 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +waf-powered distributed network builds, with a network cache. + +Caching files from a server has advantages over a NFS/Samba shared folder: + +- builds are much faster because they use local files +- builds just continue to work in case of a network glitch +- permissions are much simpler to manage +""" + +import os, urllib, tarfile, re, shutil, tempfile, sys +from collections import OrderedDict +from waflib import Context, Utils, Logs + +try: + from urllib.parse import urlencode +except ImportError: + urlencode = urllib.urlencode + +def safe_urlencode(data): + x = urlencode(data) + try: + x = x.encode('utf-8') + except Exception: + pass + return x + +try: + from urllib.error import URLError +except ImportError: + from urllib2 import URLError + +try: + from urllib.request import Request, urlopen +except ImportError: + from urllib2 import Request, urlopen + +DISTNETCACHE = os.environ.get('DISTNETCACHE', '/tmp/distnetcache') +DISTNETSERVER = os.environ.get('DISTNETSERVER', 'http://localhost:8000/cgi-bin/') +TARFORMAT = 'w:bz2' +TIMEOUT = 60 +REQUIRES = 'requires.txt' + +re_com = re.compile(r'\s*#.*', re.M) + +def total_version_order(num): + lst = num.split('.') + template = '%10s' * len(lst) + ret = template % tuple(lst) + return ret + +def get_distnet_cache(): + return getattr(Context.g_module, 'DISTNETCACHE', DISTNETCACHE) + +def get_server_url(): + return getattr(Context.g_module, 'DISTNETSERVER', DISTNETSERVER) + +def get_download_url(): + return '%s/download.py' % get_server_url() + +def get_upload_url(): + return '%s/upload.py' % get_server_url() + +def get_resolve_url(): + return '%s/resolve.py' % get_server_url() + +def send_package_name(): + out = getattr(Context.g_module, 'out', 'build') + pkgfile = '%s/package_to_upload.tarfile' % out + return pkgfile + +class package(Context.Context): + fun = 'package' + cmd = 'package' + + def execute(self): + try: + files = self.files + except AttributeError: + files = self.files = [] + + Context.Context.execute(self) + pkgfile = send_package_name() + if not pkgfile in files: + if not REQUIRES in files: + files.append(REQUIRES) + self.make_tarfile(pkgfile, files, add_to_package=False) + + def make_tarfile(self, filename, files, **kw): + if kw.get('add_to_package', True): + self.files.append(filename) + + with tarfile.open(filename, TARFORMAT) as tar: + endname = os.path.split(filename)[-1] + endname = endname.split('.')[0] + '/' + for x in files: + tarinfo = tar.gettarinfo(x, x) + tarinfo.uid = tarinfo.gid = 0 + tarinfo.uname = tarinfo.gname = 'root' + tarinfo.size = os.stat(x).st_size + if os.environ.get('SOURCE_DATE_EPOCH'): + tarinfo.mtime = int(os.environ.get('SOURCE_DATE_EPOCH')) + + # TODO - more archive creation options? + if kw.get('bare', True): + tarinfo.name = os.path.split(x)[1] + else: + tarinfo.name = endname + x # todo, if tuple, then.. + Logs.debug('distnet: adding %r to %s', tarinfo.name, filename) + with open(x, 'rb') as f: + tar.addfile(tarinfo, f) + Logs.info('Created %s', filename) + +class publish(Context.Context): + fun = 'publish' + cmd = 'publish' + def execute(self): + if hasattr(Context.g_module, 'publish'): + Context.Context.execute(self) + mod = Context.g_module + + rfile = getattr(self, 'rfile', send_package_name()) + if not os.path.isfile(rfile): + self.fatal('Create the release file with "waf release" first! %r' % rfile) + + fdata = Utils.readf(rfile, m='rb') + data = safe_urlencode([('pkgdata', fdata), ('pkgname', mod.APPNAME), ('pkgver', mod.VERSION)]) + + req = Request(get_upload_url(), data) + response = urlopen(req, timeout=TIMEOUT) + data = response.read().strip() + + if sys.hexversion>0x300000f: + data = data.decode('utf-8') + + if data != 'ok': + self.fatal('Could not publish the package %r' % data) + +class constraint(object): + def __init__(self, line=''): + self.required_line = line + self.info = [] + + line = line.strip() + if not line: + return + + lst = line.split(',') + if lst: + self.pkgname = lst[0] + self.required_version = lst[1] + for k in lst: + a, b, c = k.partition('=') + if a and c: + self.info.append((a, c)) + def __str__(self): + buf = [] + buf.append(self.pkgname) + buf.append(self.required_version) + for k in self.info: + buf.append('%s=%s' % k) + return ','.join(buf) + + def __repr__(self): + return "requires %s-%s" % (self.pkgname, self.required_version) + + def human_display(self, pkgname, pkgver): + return '%s-%s requires %s-%s' % (pkgname, pkgver, self.pkgname, self.required_version) + + def why(self): + ret = [] + for x in self.info: + if x[0] == 'reason': + ret.append(x[1]) + return ret + + def add_reason(self, reason): + self.info.append(('reason', reason)) + +def parse_constraints(text): + assert(text is not None) + constraints = [] + text = re.sub(re_com, '', text) + lines = text.splitlines() + for line in lines: + line = line.strip() + if not line: + continue + constraints.append(constraint(line)) + return constraints + +def list_package_versions(cachedir, pkgname): + pkgdir = os.path.join(cachedir, pkgname) + try: + versions = os.listdir(pkgdir) + except OSError: + return [] + versions.sort(key=total_version_order) + versions.reverse() + return versions + +class package_reader(Context.Context): + cmd = 'solver' + fun = 'solver' + + def __init__(self, **kw): + Context.Context.__init__(self, **kw) + + self.myproject = getattr(Context.g_module, 'APPNAME', 'project') + self.myversion = getattr(Context.g_module, 'VERSION', '1.0') + self.cache_constraints = {} + self.constraints = [] + + def compute_dependencies(self, filename=REQUIRES): + text = Utils.readf(filename) + data = safe_urlencode([('text', text)]) + + if '--offline' in sys.argv: + self.constraints = self.local_resolve(text) + else: + req = Request(get_resolve_url(), data) + try: + response = urlopen(req, timeout=TIMEOUT) + except URLError as e: + Logs.warn('The package server is down! %r', e) + self.constraints = self.local_resolve(text) + else: + ret = response.read() + try: + ret = ret.decode('utf-8') + except Exception: + pass + self.trace(ret) + self.constraints = parse_constraints(ret) + self.check_errors() + + def check_errors(self): + errors = False + for c in self.constraints: + if not c.required_version: + errors = True + + reasons = c.why() + if len(reasons) == 1: + Logs.error('%s but no matching package could be found in this repository', reasons[0]) + else: + Logs.error('Conflicts on package %r:', c.pkgname) + for r in reasons: + Logs.error(' %s', r) + if errors: + self.fatal('The package requirements cannot be satisfied!') + + def load_constraints(self, pkgname, pkgver, requires=REQUIRES): + try: + return self.cache_constraints[(pkgname, pkgver)] + except KeyError: + text = Utils.readf(os.path.join(get_distnet_cache(), pkgname, pkgver, requires)) + ret = parse_constraints(text) + self.cache_constraints[(pkgname, pkgver)] = ret + return ret + + def apply_constraint(self, domain, constraint): + vname = constraint.required_version.replace('*', '.*') + rev = re.compile(vname, re.M) + ret = [x for x in domain if rev.match(x)] + return ret + + def trace(self, *k): + if getattr(self, 'debug', None): + Logs.error(*k) + + def solve(self, packages_to_versions={}, packages_to_constraints={}, pkgname='', pkgver='', todo=[], done=[]): + # breadth first search + n_packages_to_versions = dict(packages_to_versions) + n_packages_to_constraints = dict(packages_to_constraints) + + self.trace("calling solve with %r %r %r" % (packages_to_versions, todo, done)) + done = done + [pkgname] + + constraints = self.load_constraints(pkgname, pkgver) + self.trace("constraints %r" % constraints) + + for k in constraints: + try: + domain = n_packages_to_versions[k.pkgname] + except KeyError: + domain = list_package_versions(get_distnet_cache(), k.pkgname) + + + self.trace("constraints?") + if not k.pkgname in done: + todo = todo + [k.pkgname] + + self.trace("domain before %s -> %s, %r" % (pkgname, k.pkgname, domain)) + + # apply the constraint + domain = self.apply_constraint(domain, k) + + self.trace("domain after %s -> %s, %r" % (pkgname, k.pkgname, domain)) + + n_packages_to_versions[k.pkgname] = domain + + # then store the constraint applied + constraints = list(packages_to_constraints.get(k.pkgname, [])) + constraints.append((pkgname, pkgver, k)) + n_packages_to_constraints[k.pkgname] = constraints + + if not domain: + self.trace("no domain while processing constraint %r from %r %r" % (domain, pkgname, pkgver)) + return (n_packages_to_versions, n_packages_to_constraints) + + # next package on the todo list + if not todo: + return (n_packages_to_versions, n_packages_to_constraints) + + n_pkgname = todo[0] + n_pkgver = n_packages_to_versions[n_pkgname][0] + tmp = dict(n_packages_to_versions) + tmp[n_pkgname] = [n_pkgver] + + self.trace("fixed point %s" % n_pkgname) + + return self.solve(tmp, n_packages_to_constraints, n_pkgname, n_pkgver, todo[1:], done) + + def get_results(self): + return '\n'.join([str(c) for c in self.constraints]) + + def solution_to_constraints(self, versions, constraints): + solution = [] + for p in versions: + c = constraint() + solution.append(c) + + c.pkgname = p + if versions[p]: + c.required_version = versions[p][0] + else: + c.required_version = '' + for (from_pkgname, from_pkgver, c2) in constraints.get(p, ''): + c.add_reason(c2.human_display(from_pkgname, from_pkgver)) + return solution + + def local_resolve(self, text): + self.cache_constraints[(self.myproject, self.myversion)] = parse_constraints(text) + p2v = OrderedDict({self.myproject: [self.myversion]}) + (versions, constraints) = self.solve(p2v, {}, self.myproject, self.myversion, []) + return self.solution_to_constraints(versions, constraints) + + def download_to_file(self, pkgname, pkgver, subdir, tmp): + data = safe_urlencode([('pkgname', pkgname), ('pkgver', pkgver), ('pkgfile', subdir)]) + req = urlopen(get_download_url(), data, timeout=TIMEOUT) + with open(tmp, 'wb') as f: + while True: + buf = req.read(8192) + if not buf: + break + f.write(buf) + + def extract_tar(self, subdir, pkgdir, tmpfile): + with tarfile.open(tmpfile) as f: + temp = tempfile.mkdtemp(dir=pkgdir) + try: + f.extractall(temp) + os.rename(temp, os.path.join(pkgdir, subdir)) + finally: + try: + shutil.rmtree(temp) + except Exception: + pass + + def get_pkg_dir(self, pkgname, pkgver, subdir): + pkgdir = os.path.join(get_distnet_cache(), pkgname, pkgver) + if not os.path.isdir(pkgdir): + os.makedirs(pkgdir) + + target = os.path.join(pkgdir, subdir) + + if os.path.exists(target): + return target + + (fd, tmp) = tempfile.mkstemp(dir=pkgdir) + try: + os.close(fd) + self.download_to_file(pkgname, pkgver, subdir, tmp) + if subdir == REQUIRES: + os.rename(tmp, target) + else: + self.extract_tar(subdir, pkgdir, tmp) + finally: + try: + os.remove(tmp) + except OSError: + pass + + return target + + def __iter__(self): + if not self.constraints: + self.compute_dependencies() + for x in self.constraints: + if x.pkgname == self.myproject: + continue + yield x + + def execute(self): + self.compute_dependencies() + +packages = package_reader() + +def load_tools(ctx, extra): + global packages + for c in packages: + packages.get_pkg_dir(c.pkgname, c.required_version, extra) + noarchdir = packages.get_pkg_dir(c.pkgname, c.required_version, 'noarch') + for x in os.listdir(noarchdir): + if x.startswith('waf_') and x.endswith('.py'): + ctx.load([x.rstrip('.py')], tooldir=[noarchdir]) + +def options(opt): + opt.add_option('--offline', action='store_true') + packages.execute() + load_tools(opt, REQUIRES) + +def configure(conf): + load_tools(conf, conf.variant) + +def build(bld): + load_tools(bld, bld.variant) + diff --git a/third_party/waf/waflib/extras/doxygen.py b/third_party/waf/waflib/extras/doxygen.py new file mode 100644 index 0000000..0fda703 --- /dev/null +++ b/third_party/waf/waflib/extras/doxygen.py @@ -0,0 +1,236 @@ +#! /usr/bin/env python +# encoding: UTF-8 +# Thomas Nagy 2008-2010 (ita) + +""" + +Doxygen support + +Variables passed to bld(): +* doxyfile -- the Doxyfile to use +* doxy_tar -- destination archive for generated documentation (if desired) +* install_path -- where to install the documentation +* pars -- dictionary overriding doxygen configuration settings + +When using this tool, the wscript will look like: + + def options(opt): + opt.load('doxygen') + + def configure(conf): + conf.load('doxygen') + # check conf.env.DOXYGEN, if it is mandatory + + def build(bld): + if bld.env.DOXYGEN: + bld(features="doxygen", doxyfile='Doxyfile', ...) +""" + +import os, os.path, re +from collections import OrderedDict +from waflib import Task, Utils, Node +from waflib.TaskGen import feature + +DOXY_STR = '"${DOXYGEN}" - ' +DOXY_FMTS = 'html latex man rft xml'.split() +DOXY_FILE_PATTERNS = '*.' + ' *.'.join(''' +c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx hpp h++ idl odl cs php php3 +inc m mm py f90c cc cxx cpp c++ java ii ixx ipp i++ inl h hh hxx +'''.split()) + +re_rl = re.compile('\\\\\r*\n', re.MULTILINE) +re_nl = re.compile('\r*\n', re.M) +def parse_doxy(txt): + ''' + Parses a doxygen file. + Returns an ordered dictionary. We cannot return a default dictionary, as the + order in which the entries are reported does matter, especially for the + '@INCLUDE' lines. + ''' + tbl = OrderedDict() + txt = re_rl.sub('', txt) + lines = re_nl.split(txt) + for x in lines: + x = x.strip() + if not x or x.startswith('#') or x.find('=') < 0: + continue + if x.find('+=') >= 0: + tmp = x.split('+=') + key = tmp[0].strip() + if key in tbl: + tbl[key] += ' ' + '+='.join(tmp[1:]).strip() + else: + tbl[key] = '+='.join(tmp[1:]).strip() + else: + tmp = x.split('=') + tbl[tmp[0].strip()] = '='.join(tmp[1:]).strip() + return tbl + +class doxygen(Task.Task): + vars = ['DOXYGEN', 'DOXYFLAGS'] + color = 'BLUE' + ext_in = [ '.py', '.c', '.h', '.java', '.pb.cc' ] + + def runnable_status(self): + ''' + self.pars are populated in runnable_status - because this function is being + run *before* both self.pars "consumers" - scan() and run() + + set output_dir (node) for the output + ''' + + for x in self.run_after: + if not x.hasrun: + return Task.ASK_LATER + + if not getattr(self, 'pars', None): + txt = self.inputs[0].read() + self.pars = parse_doxy(txt) + + # Override with any parameters passed to the task generator + if getattr(self.generator, 'pars', None): + for k, v in self.generator.pars.items(): + self.pars[k] = v + + if self.pars.get('OUTPUT_DIRECTORY'): + # Use the path parsed from the Doxyfile as an absolute path + output_node = self.inputs[0].parent.get_bld().make_node(self.pars['OUTPUT_DIRECTORY']) + else: + # If no OUTPUT_PATH was specified in the Doxyfile, build path from the Doxyfile name + '.doxy' + output_node = self.inputs[0].parent.get_bld().make_node(self.inputs[0].name + '.doxy') + output_node.mkdir() + self.pars['OUTPUT_DIRECTORY'] = output_node.abspath() + + self.doxy_inputs = getattr(self, 'doxy_inputs', []) + if not self.pars.get('INPUT'): + self.doxy_inputs.append(self.inputs[0].parent) + else: + for i in self.pars.get('INPUT').split(): + if os.path.isabs(i): + node = self.generator.bld.root.find_node(i) + else: + node = self.inputs[0].parent.find_node(i) + if not node: + self.generator.bld.fatal('Could not find the doxygen input %r' % i) + self.doxy_inputs.append(node) + + if not getattr(self, 'output_dir', None): + bld = self.generator.bld + # Output path is always an absolute path as it was transformed above. + self.output_dir = bld.root.find_dir(self.pars['OUTPUT_DIRECTORY']) + + self.signature() + ret = Task.Task.runnable_status(self) + if ret == Task.SKIP_ME: + # in case the files were removed + self.add_install() + return ret + + def scan(self): + exclude_patterns = self.pars.get('EXCLUDE_PATTERNS','').split() + exclude_patterns = [pattern.replace('*/', '**/') for pattern in exclude_patterns] + file_patterns = self.pars.get('FILE_PATTERNS','').split() + if not file_patterns: + file_patterns = DOXY_FILE_PATTERNS.split() + if self.pars.get('RECURSIVE') == 'YES': + file_patterns = ["**/%s" % pattern for pattern in file_patterns] + nodes = [] + names = [] + for node in self.doxy_inputs: + if os.path.isdir(node.abspath()): + for m in node.ant_glob(incl=file_patterns, excl=exclude_patterns): + nodes.append(m) + else: + nodes.append(node) + return (nodes, names) + + def run(self): + dct = self.pars.copy() + code = '\n'.join(['%s = %s' % (x, dct[x]) for x in self.pars]) + code = code.encode() # for python 3 + #fmt = DOXY_STR % (self.inputs[0].parent.abspath()) + cmd = Utils.subst_vars(DOXY_STR, self.env) + env = self.env.env or None + proc = Utils.subprocess.Popen(cmd, shell=True, stdin=Utils.subprocess.PIPE, env=env, cwd=self.inputs[0].parent.abspath()) + proc.communicate(code) + return proc.returncode + + def post_run(self): + nodes = self.output_dir.ant_glob('**/*', quiet=True) + for x in nodes: + self.generator.bld.node_sigs[x] = self.uid() + self.add_install() + return Task.Task.post_run(self) + + def add_install(self): + nodes = self.output_dir.ant_glob('**/*', quiet=True) + self.outputs += nodes + if getattr(self.generator, 'install_path', None): + if not getattr(self.generator, 'doxy_tar', None): + self.generator.add_install_files(install_to=self.generator.install_path, + install_from=self.outputs, + postpone=False, + cwd=self.output_dir, + relative_trick=True) + +class tar(Task.Task): + "quick tar creation" + run_str = '${TAR} ${TAROPTS} ${TGT} ${SRC}' + color = 'RED' + after = ['doxygen'] + def runnable_status(self): + for x in getattr(self, 'input_tasks', []): + if not x.hasrun: + return Task.ASK_LATER + + if not getattr(self, 'tar_done_adding', None): + # execute this only once + self.tar_done_adding = True + for x in getattr(self, 'input_tasks', []): + self.set_inputs(x.outputs) + if not self.inputs: + return Task.SKIP_ME + return Task.Task.runnable_status(self) + + def __str__(self): + tgt_str = ' '.join([a.path_from(a.ctx.launch_node()) for a in self.outputs]) + return '%s: %s\n' % (self.__class__.__name__, tgt_str) + +@feature('doxygen') +def process_doxy(self): + if not getattr(self, 'doxyfile', None): + self.bld.fatal('no doxyfile variable specified??') + + node = self.doxyfile + if not isinstance(node, Node.Node): + node = self.path.find_resource(node) + if not node: + self.bld.fatal('doxygen file %s not found' % self.doxyfile) + + # the task instance + dsk = self.create_task('doxygen', node, always_run=getattr(self, 'always', False)) + + if getattr(self, 'doxy_tar', None): + tsk = self.create_task('tar', always_run=getattr(self, 'always', False)) + tsk.input_tasks = [dsk] + tsk.set_outputs(self.path.find_or_declare(self.doxy_tar)) + if self.doxy_tar.endswith('bz2'): + tsk.env['TAROPTS'] = ['cjf'] + elif self.doxy_tar.endswith('gz'): + tsk.env['TAROPTS'] = ['czf'] + else: + tsk.env['TAROPTS'] = ['cf'] + if getattr(self, 'install_path', None): + self.add_install_files(install_to=self.install_path, install_from=tsk.outputs) + +def configure(conf): + ''' + Check if doxygen and tar commands are present in the system + + If the commands are present, then conf.env.DOXYGEN and conf.env.TAR + variables will be set. Detection can be controlled by setting DOXYGEN and + TAR environmental variables. + ''' + + conf.find_program('doxygen', var='DOXYGEN', mandatory=False) + conf.find_program('tar', var='TAR', mandatory=False) diff --git a/third_party/waf/waflib/extras/dpapi.py b/third_party/waf/waflib/extras/dpapi.py new file mode 100644 index 0000000..b94d482 --- /dev/null +++ b/third_party/waf/waflib/extras/dpapi.py @@ -0,0 +1,87 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Matt Clarkson, 2012 + +''' +DPAPI access library (http://msdn.microsoft.com/en-us/library/ms995355.aspx) +This file uses code originally created by Crusher Joe: +http://article.gmane.org/gmane.comp.python.ctypes/420 +And modified by Wayne Koorts: +http://stackoverflow.com/questions/463832/using-dpapi-with-python +''' + +from ctypes import windll, byref, cdll, Structure, POINTER, c_char, c_buffer +from ctypes.wintypes import DWORD +from waflib.Configure import conf + +LocalFree = windll.kernel32.LocalFree +memcpy = cdll.msvcrt.memcpy +CryptProtectData = windll.crypt32.CryptProtectData +CryptUnprotectData = windll.crypt32.CryptUnprotectData +CRYPTPROTECT_UI_FORBIDDEN = 0x01 +try: + extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd'.encode('ascii') +except AttributeError: + extra_entropy = 'cl;ad13 \0al;323kjd #(adl;k$#ajsd' + +class DATA_BLOB(Structure): + _fields_ = [ + ('cbData', DWORD), + ('pbData', POINTER(c_char)) + ] + +def get_data(blob_out): + cbData = int(blob_out.cbData) + pbData = blob_out.pbData + buffer = c_buffer(cbData) + memcpy(buffer, pbData, cbData) + LocalFree(pbData) + return buffer.raw + +@conf +def dpapi_encrypt_data(self, input_bytes, entropy = extra_entropy): + ''' + Encrypts data and returns byte string + + :param input_bytes: The data to be encrypted + :type input_bytes: String or Bytes + :param entropy: Extra entropy to add to the encryption process (optional) + :type entropy: String or Bytes + ''' + if not isinstance(input_bytes, bytes) or not isinstance(entropy, bytes): + self.fatal('The inputs to dpapi must be bytes') + buffer_in = c_buffer(input_bytes, len(input_bytes)) + buffer_entropy = c_buffer(entropy, len(entropy)) + blob_in = DATA_BLOB(len(input_bytes), buffer_in) + blob_entropy = DATA_BLOB(len(entropy), buffer_entropy) + blob_out = DATA_BLOB() + + if CryptProtectData(byref(blob_in), 'python_data', byref(blob_entropy), + None, None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)): + return get_data(blob_out) + else: + self.fatal('Failed to decrypt data') + +@conf +def dpapi_decrypt_data(self, encrypted_bytes, entropy = extra_entropy): + ''' + Decrypts data and returns byte string + + :param encrypted_bytes: The encrypted data + :type encrypted_bytes: Bytes + :param entropy: Extra entropy to add to the encryption process (optional) + :type entropy: String or Bytes + ''' + if not isinstance(encrypted_bytes, bytes) or not isinstance(entropy, bytes): + self.fatal('The inputs to dpapi must be bytes') + buffer_in = c_buffer(encrypted_bytes, len(encrypted_bytes)) + buffer_entropy = c_buffer(entropy, len(entropy)) + blob_in = DATA_BLOB(len(encrypted_bytes), buffer_in) + blob_entropy = DATA_BLOB(len(entropy), buffer_entropy) + blob_out = DATA_BLOB() + if CryptUnprotectData(byref(blob_in), None, byref(blob_entropy), None, + None, CRYPTPROTECT_UI_FORBIDDEN, byref(blob_out)): + return get_data(blob_out) + else: + self.fatal('Failed to decrypt data') + diff --git a/third_party/waf/waflib/extras/eclipse.py b/third_party/waf/waflib/extras/eclipse.py new file mode 100644 index 0000000..49ca968 --- /dev/null +++ b/third_party/waf/waflib/extras/eclipse.py @@ -0,0 +1,501 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Eclipse CDT 5.0 generator for Waf +# Richard Quirk 2009-1011 (New BSD License) +# Thomas Nagy 2011 (ported to Waf 1.6) + +""" +Usage: + +def options(opt): + opt.load('eclipse') + +To add additional targets beside standard ones (configure, dist, install, check) +the environment ECLIPSE_EXTRA_TARGETS can be set (ie. to ['test', 'lint', 'docs']) + +$ waf configure eclipse +""" + +import sys, os +from waflib import Utils, Logs, Context, Build, TaskGen, Scripting, Errors, Node +from xml.dom.minidom import Document + +STANDARD_INCLUDES = [ '/usr/local/include', '/usr/include' ] + +oe_cdt = 'org.eclipse.cdt' +cdt_mk = oe_cdt + '.make.core' +cdt_core = oe_cdt + '.core' +cdt_bld = oe_cdt + '.build.core' +extbuilder_dir = '.externalToolBuilders' +extbuilder_name = 'Waf_Builder.launch' +settings_dir = '.settings' +settings_name = 'language.settings.xml' + +class eclipse(Build.BuildContext): + cmd = 'eclipse' + fun = Scripting.default_cmd + + def execute(self): + """ + Entry point + """ + self.restore() + if not self.all_envs: + self.load_envs() + self.recurse([self.run_dir]) + + appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())) + self.create_cproject(appname, pythonpath=self.env['ECLIPSE_PYTHON_PATH']) + + # Helper to dump the XML document content to XML with UTF-8 encoding + def write_conf_to_xml(self, filename, document): + self.srcnode.make_node(filename).write(document.toprettyxml(encoding='UTF-8'), flags='wb') + + def create_cproject(self, appname, workspace_includes=[], pythonpath=[]): + """ + Create the Eclipse CDT .project and .cproject files + @param appname The name that will appear in the Project Explorer + @param build The BuildContext object to extract includes from + @param workspace_includes Optional project includes to prevent + "Unresolved Inclusion" errors in the Eclipse editor + @param pythonpath Optional project specific python paths + """ + hasc = hasjava = haspython = False + source_dirs = [] + cpppath = self.env['CPPPATH'] + javasrcpath = [] + javalibpath = [] + includes = STANDARD_INCLUDES + if sys.platform != 'win32': + cc = self.env.CC or self.env.CXX + if cc: + cmd = cc + ['-xc++', '-E', '-Wp,-v', '-'] + try: + gccout = self.cmd_and_log(cmd, output=Context.STDERR, quiet=Context.BOTH, input='\n'.encode()).splitlines() + except Errors.WafError: + pass + else: + includes = [] + for ipath in gccout: + if ipath.startswith(' /'): + includes.append(ipath[1:]) + cpppath += includes + Logs.warn('Generating Eclipse CDT project files') + + for g in self.groups: + for tg in g: + if not isinstance(tg, TaskGen.task_gen): + continue + + tg.post() + + # Add local Python modules paths to configuration so object resolving will work in IDE + # This may also contain generated files (ie. pyqt5 or protoc) that get picked from build + if 'py' in tg.features: + pypath = tg.path.relpath() + py_installfrom = getattr(tg, 'install_from', None) + if isinstance(py_installfrom, Node.Node): + pypath = py_installfrom.path_from(self.root.make_node(self.top_dir)) + if pypath not in pythonpath: + pythonpath.append(pypath) + haspython = True + + # Add Java source directories so object resolving works in IDE + # This may also contain generated files (ie. protoc) that get picked from build + if 'javac' in tg.features: + java_src = tg.path.relpath() + java_srcdir = getattr(tg.javac_task, 'srcdir', None) + if java_srcdir: + if isinstance(java_srcdir, Node.Node): + java_srcdir = [java_srcdir] + for x in Utils.to_list(java_srcdir): + x = x.path_from(self.root.make_node(self.top_dir)) + if x not in javasrcpath: + javasrcpath.append(x) + else: + if java_src not in javasrcpath: + javasrcpath.append(java_src) + hasjava = True + + # Check if there are external dependencies and add them as external jar so they will be resolved by Eclipse + usedlibs=getattr(tg, 'use', []) + for x in Utils.to_list(usedlibs): + for cl in Utils.to_list(tg.env['CLASSPATH_'+x]): + if cl not in javalibpath: + javalibpath.append(cl) + + if not getattr(tg, 'link_task', None): + continue + + features = Utils.to_list(getattr(tg, 'features', '')) + + is_cc = 'c' in features or 'cxx' in features + + incnodes = tg.to_incnodes(tg.to_list(getattr(tg, 'includes', [])) + tg.env['INCLUDES']) + for p in incnodes: + path = p.path_from(self.srcnode) + + if (path.startswith("/")): + if path not in cpppath: + cpppath.append(path) + else: + if path not in workspace_includes: + workspace_includes.append(path) + + if is_cc and path not in source_dirs: + source_dirs.append(path) + + hasc = True + + waf_executable = os.path.abspath(sys.argv[0]) + project = self.impl_create_project(sys.executable, appname, hasc, hasjava, haspython, waf_executable) + self.write_conf_to_xml('.project', project) + + if hasc: + project = self.impl_create_cproject(sys.executable, waf_executable, appname, workspace_includes, cpppath, source_dirs) + self.write_conf_to_xml('.cproject', project) + + if haspython: + project = self.impl_create_pydevproject(sys.path, pythonpath) + self.write_conf_to_xml('.pydevproject', project) + + if hasjava: + project = self.impl_create_javaproject(javasrcpath, javalibpath) + self.write_conf_to_xml('.classpath', project) + + # Create editor language settings to have correct standards applied in IDE, as per project configuration + try: + os.mkdir(settings_dir) + except OSError: + pass # Ignore if dir already exists + + lang_settings = Document() + project = lang_settings.createElement('project') + + # Language configurations for C and C++ via cdt + if hasc: + configuration = self.add(lang_settings, project, 'configuration', + {'id' : 'org.eclipse.cdt.core.default.config.1', 'name': 'Default'}) + + extension = self.add(lang_settings, configuration, 'extension', {'point': 'org.eclipse.cdt.core.LanguageSettingsProvider'}) + + provider = self.add(lang_settings, extension, 'provider', + { 'copy-of': 'extension', + 'id': 'org.eclipse.cdt.ui.UserLanguageSettingsProvider'}) + + provider = self.add(lang_settings, extension, 'provider-reference', + { 'id': 'org.eclipse.cdt.core.ReferencedProjectsLanguageSettingsProvider', + 'ref': 'shared-provider'}) + + provider = self.add(lang_settings, extension, 'provider-reference', + { 'id': 'org.eclipse.cdt.managedbuilder.core.MBSLanguageSettingsProvider', + 'ref': 'shared-provider'}) + + # C and C++ are kept as separated providers so appropriate flags are used also in mixed projects + if self.env.CC: + provider = self.add(lang_settings, extension, 'provider', + { 'class': 'org.eclipse.cdt.managedbuilder.language.settings.providers.GCCBuiltinSpecsDetector', + 'console': 'false', + 'id': 'org.eclipse.cdt.managedbuilder.language.settings.providers.GCCBuiltinSpecsDetector.1', + 'keep-relative-paths' : 'false', + 'name': 'CDT GCC Built-in Compiler Settings', + 'parameter': '%s %s ${FLAGS} -E -P -v -dD "${INPUTS}"'%(self.env.CC[0],' '.join(self.env['CFLAGS'])), + 'prefer-non-shared': 'true' }) + + self.add(lang_settings, provider, 'language-scope', { 'id': 'org.eclipse.cdt.core.gcc'}) + + if self.env.CXX: + provider = self.add(lang_settings, extension, 'provider', + { 'class': 'org.eclipse.cdt.managedbuilder.language.settings.providers.GCCBuiltinSpecsDetector', + 'console': 'false', + 'id': 'org.eclipse.cdt.managedbuilder.language.settings.providers.GCCBuiltinSpecsDetector.2', + 'keep-relative-paths' : 'false', + 'name': 'CDT GCC Built-in Compiler Settings', + 'parameter': '%s %s ${FLAGS} -E -P -v -dD "${INPUTS}"'%(self.env.CXX[0],' '.join(self.env['CXXFLAGS'])), + 'prefer-non-shared': 'true' }) + self.add(lang_settings, provider, 'language-scope', { 'id': 'org.eclipse.cdt.core.g++'}) + + lang_settings.appendChild(project) + self.write_conf_to_xml('%s%s%s'%(settings_dir, os.path.sep, settings_name), lang_settings) + + def impl_create_project(self, executable, appname, hasc, hasjava, haspython, waf_executable): + doc = Document() + projectDescription = doc.createElement('projectDescription') + self.add(doc, projectDescription, 'name', appname) + self.add(doc, projectDescription, 'comment') + self.add(doc, projectDescription, 'projects') + buildSpec = self.add(doc, projectDescription, 'buildSpec') + buildCommand = self.add(doc, buildSpec, 'buildCommand') + self.add(doc, buildCommand, 'triggers', 'clean,full,incremental,') + arguments = self.add(doc, buildCommand, 'arguments') + dictionaries = {} + + # If CDT is present, instruct this one to call waf as it is more flexible (separate build/clean ...) + if hasc: + self.add(doc, buildCommand, 'name', oe_cdt + '.managedbuilder.core.genmakebuilder') + # the default make-style targets are overwritten by the .cproject values + dictionaries = { + cdt_mk + '.contents': cdt_mk + '.activeConfigSettings', + cdt_mk + '.enableAutoBuild': 'false', + cdt_mk + '.enableCleanBuild': 'true', + cdt_mk + '.enableFullBuild': 'true', + } + else: + # Otherwise for Java/Python an external builder tool is created that will call waf build + self.add(doc, buildCommand, 'name', 'org.eclipse.ui.externaltools.ExternalToolBuilder') + dictionaries = { + 'LaunchConfigHandle': '<project>/%s/%s'%(extbuilder_dir, extbuilder_name), + } + # The definition is in a separate directory XML file + try: + os.mkdir(extbuilder_dir) + except OSError: + pass # Ignore error if already exists + + # Populate here the external builder XML calling waf + builder = Document() + launchConfiguration = doc.createElement('launchConfiguration') + launchConfiguration.setAttribute('type', 'org.eclipse.ui.externaltools.ProgramBuilderLaunchConfigurationType') + self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.debug.ui.ATTR_LAUNCH_IN_BACKGROUND', 'value': 'false'}) + self.add(doc, launchConfiguration, 'booleanAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TRIGGERS_CONFIGURED', 'value': 'true'}) + self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_LOCATION', 'value': waf_executable}) + self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_RUN_BUILD_KINDS', 'value': 'full,incremental,'}) + self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_TOOL_ARGUMENTS', 'value': 'build'}) + self.add(doc, launchConfiguration, 'stringAttribute', {'key': 'org.eclipse.ui.externaltools.ATTR_WORKING_DIRECTORY', 'value': '${project_loc}'}) + builder.appendChild(launchConfiguration) + # And write the XML to the file references before + self.write_conf_to_xml('%s%s%s'%(extbuilder_dir, os.path.sep, extbuilder_name), builder) + + + for k, v in dictionaries.items(): + self.addDictionary(doc, arguments, k, v) + + natures = self.add(doc, projectDescription, 'natures') + + if hasc: + nature_list = """ + core.ccnature + managedbuilder.core.ScannerConfigNature + managedbuilder.core.managedBuildNature + core.cnature + """.split() + for n in nature_list: + self.add(doc, natures, 'nature', oe_cdt + '.' + n) + + if haspython: + self.add(doc, natures, 'nature', 'org.python.pydev.pythonNature') + if hasjava: + self.add(doc, natures, 'nature', 'org.eclipse.jdt.core.javanature') + + doc.appendChild(projectDescription) + return doc + + def impl_create_cproject(self, executable, waf_executable, appname, workspace_includes, cpppath, source_dirs=[]): + doc = Document() + doc.appendChild(doc.createProcessingInstruction('fileVersion', '4.0.0')) + cconf_id = cdt_core + '.default.config.1' + cproject = doc.createElement('cproject') + storageModule = self.add(doc, cproject, 'storageModule', + {'moduleId': cdt_core + '.settings'}) + cconf = self.add(doc, storageModule, 'cconfiguration', {'id':cconf_id}) + + storageModule = self.add(doc, cconf, 'storageModule', + {'buildSystemId': oe_cdt + '.managedbuilder.core.configurationDataProvider', + 'id': cconf_id, + 'moduleId': cdt_core + '.settings', + 'name': 'Default'}) + + self.add(doc, storageModule, 'externalSettings') + + extensions = self.add(doc, storageModule, 'extensions') + extension_list = """ + VCErrorParser + MakeErrorParser + GCCErrorParser + GASErrorParser + GLDErrorParser + """.split() + self.add(doc, extensions, 'extension', {'id': cdt_core + '.ELF', 'point':cdt_core + '.BinaryParser'}) + for e in extension_list: + self.add(doc, extensions, 'extension', {'id': cdt_core + '.' + e, 'point':cdt_core + '.ErrorParser'}) + + storageModule = self.add(doc, cconf, 'storageModule', + {'moduleId': 'cdtBuildSystem', 'version': '4.0.0'}) + config = self.add(doc, storageModule, 'configuration', + {'artifactName': appname, + 'id': cconf_id, + 'name': 'Default', + 'parent': cdt_bld + '.prefbase.cfg'}) + folderInfo = self.add(doc, config, 'folderInfo', + {'id': cconf_id+'.', 'name': '/', 'resourcePath': ''}) + + toolChain = self.add(doc, folderInfo, 'toolChain', + {'id': cdt_bld + '.prefbase.toolchain.1', + 'name': 'No ToolChain', + 'resourceTypeBasedDiscovery': 'false', + 'superClass': cdt_bld + '.prefbase.toolchain'}) + + self.add(doc, toolChain, 'targetPlatform', {'binaryParser': 'org.eclipse.cdt.core.ELF', 'id': cdt_bld + '.prefbase.toolchain.1', 'name': ''}) + + waf_build = '"%s" %s'%(waf_executable, eclipse.fun) + waf_clean = '"%s" clean'%(waf_executable) + self.add(doc, toolChain, 'builder', + {'autoBuildTarget': waf_build, + 'command': executable, + 'enableAutoBuild': 'false', + 'cleanBuildTarget': waf_clean, + 'enableIncrementalBuild': 'true', + 'id': cdt_bld + '.settings.default.builder.1', + 'incrementalBuildTarget': waf_build, + 'managedBuildOn': 'false', + 'name': 'Gnu Make Builder', + 'superClass': cdt_bld + '.settings.default.builder'}) + + tool_index = 1; + for tool_name in ("Assembly", "GNU C++", "GNU C"): + tool = self.add(doc, toolChain, 'tool', + {'id': cdt_bld + '.settings.holder.' + str(tool_index), + 'name': tool_name, + 'superClass': cdt_bld + '.settings.holder'}) + if cpppath or workspace_includes: + incpaths = cdt_bld + '.settings.holder.incpaths' + option = self.add(doc, tool, 'option', + {'id': incpaths + '.' + str(tool_index), + 'name': 'Include Paths', + 'superClass': incpaths, + 'valueType': 'includePath'}) + for i in workspace_includes: + self.add(doc, option, 'listOptionValue', + {'builtIn': 'false', + 'value': '"${workspace_loc:/%s/%s}"'%(appname, i)}) + for i in cpppath: + self.add(doc, option, 'listOptionValue', + {'builtIn': 'false', + 'value': '"%s"'%(i)}) + if tool_name == "GNU C++" or tool_name == "GNU C": + self.add(doc,tool,'inputType',{ 'id':'org.eclipse.cdt.build.core.settings.holder.inType.' + str(tool_index), \ + 'languageId':'org.eclipse.cdt.core.gcc' if tool_name == "GNU C" else 'org.eclipse.cdt.core.g++','languageName':tool_name, \ + 'sourceContentType':'org.eclipse.cdt.core.cSource,org.eclipse.cdt.core.cHeader', \ + 'superClass':'org.eclipse.cdt.build.core.settings.holder.inType' }) + tool_index += 1 + + if source_dirs: + sourceEntries = self.add(doc, config, 'sourceEntries') + for i in source_dirs: + self.add(doc, sourceEntries, 'entry', + {'excluding': i, + 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', + 'kind': 'sourcePath', + 'name': ''}) + self.add(doc, sourceEntries, 'entry', + { + 'flags': 'VALUE_WORKSPACE_PATH|RESOLVED', + 'kind': 'sourcePath', + 'name': i}) + + storageModule = self.add(doc, cconf, 'storageModule', + {'moduleId': cdt_mk + '.buildtargets'}) + buildTargets = self.add(doc, storageModule, 'buildTargets') + def addTargetWrap(name, runAll): + return self.addTarget(doc, buildTargets, executable, name, + '"%s" %s'%(waf_executable, name), runAll) + addTargetWrap('configure', True) + addTargetWrap('dist', False) + addTargetWrap('install', False) + addTargetWrap('check', False) + for addTgt in self.env.ECLIPSE_EXTRA_TARGETS or []: + addTargetWrap(addTgt, False) + + storageModule = self.add(doc, cproject, 'storageModule', + {'moduleId': 'cdtBuildSystem', + 'version': '4.0.0'}) + + self.add(doc, storageModule, 'project', {'id': '%s.null.1'%appname, 'name': appname}) + + storageModule = self.add(doc, cproject, 'storageModule', + {'moduleId': 'org.eclipse.cdt.core.LanguageSettingsProviders'}) + + storageModule = self.add(doc, cproject, 'storageModule', + {'moduleId': 'scannerConfiguration'}) + + doc.appendChild(cproject) + return doc + + def impl_create_pydevproject(self, system_path, user_path): + # create a pydevproject file + doc = Document() + doc.appendChild(doc.createProcessingInstruction('eclipse-pydev', 'version="1.0"')) + pydevproject = doc.createElement('pydev_project') + prop = self.add(doc, pydevproject, + 'pydev_property', + 'python %d.%d'%(sys.version_info[0], sys.version_info[1])) + prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_VERSION') + prop = self.add(doc, pydevproject, 'pydev_property', 'Default') + prop.setAttribute('name', 'org.python.pydev.PYTHON_PROJECT_INTERPRETER') + # add waf's paths + wafadmin = [p for p in system_path if p.find('wafadmin') != -1] + if wafadmin: + prop = self.add(doc, pydevproject, 'pydev_pathproperty', + {'name':'org.python.pydev.PROJECT_EXTERNAL_SOURCE_PATH'}) + for i in wafadmin: + self.add(doc, prop, 'path', i) + if user_path: + prop = self.add(doc, pydevproject, 'pydev_pathproperty', + {'name':'org.python.pydev.PROJECT_SOURCE_PATH'}) + for i in user_path: + self.add(doc, prop, 'path', '/${PROJECT_DIR_NAME}/'+i) + + doc.appendChild(pydevproject) + return doc + + def impl_create_javaproject(self, javasrcpath, javalibpath): + # create a .classpath file for java usage + doc = Document() + javaproject = doc.createElement('classpath') + if javasrcpath: + for i in javasrcpath: + self.add(doc, javaproject, 'classpathentry', + {'kind': 'src', 'path': i}) + + if javalibpath: + for i in javalibpath: + self.add(doc, javaproject, 'classpathentry', + {'kind': 'lib', 'path': i}) + + self.add(doc, javaproject, 'classpathentry', {'kind': 'con', 'path': 'org.eclipse.jdt.launching.JRE_CONTAINER'}) + self.add(doc, javaproject, 'classpathentry', {'kind': 'output', 'path': self.bldnode.name }) + doc.appendChild(javaproject) + return doc + + def addDictionary(self, doc, parent, k, v): + dictionary = self.add(doc, parent, 'dictionary') + self.add(doc, dictionary, 'key', k) + self.add(doc, dictionary, 'value', v) + return dictionary + + def addTarget(self, doc, buildTargets, executable, name, buildTarget, runAllBuilders=True): + target = self.add(doc, buildTargets, 'target', + {'name': name, + 'path': '', + 'targetID': oe_cdt + '.build.MakeTargetBuilder'}) + self.add(doc, target, 'buildCommand', executable) + self.add(doc, target, 'buildArguments', None) + self.add(doc, target, 'buildTarget', buildTarget) + self.add(doc, target, 'stopOnError', 'true') + self.add(doc, target, 'useDefaultCommand', 'false') + self.add(doc, target, 'runAllBuilders', str(runAllBuilders).lower()) + + def add(self, doc, parent, tag, value = None): + el = doc.createElement(tag) + if (value): + if type(value) == type(str()): + el.appendChild(doc.createTextNode(value)) + elif type(value) == type(dict()): + self.setAttributes(el, value) + parent.appendChild(el) + return el + + def setAttributes(self, node, attrs): + for k, v in attrs.items(): + node.setAttribute(k, v) + diff --git a/third_party/waf/waflib/extras/erlang.py b/third_party/waf/waflib/extras/erlang.py new file mode 100644 index 0000000..0b93d9a --- /dev/null +++ b/third_party/waf/waflib/extras/erlang.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010 (ita) +# Przemyslaw Rzepecki, 2016 + +""" +Erlang support +""" + +import re +from waflib import Task, TaskGen +from waflib.TaskGen import feature, after_method, before_method +# to load the method "to_incnodes" below +from waflib.Tools import ccroot + +# Those flags are required by the Erlang VM to execute/evaluate code in +# non-interactive mode. It is used in this tool to create Erlang modules +# documentation and run unit tests. The user can pass additional arguments to the +# 'erl' command with ERL_FLAGS environment variable. +EXEC_NON_INTERACTIVE = ['-noshell', '-noinput', '-eval'] + +def configure(conf): + conf.find_program('erlc', var='ERLC') + conf.find_program('erl', var='ERL') + conf.add_os_flags('ERLC_FLAGS') + conf.add_os_flags('ERL_FLAGS') + conf.env.ERLC_DEF_PATTERN = '-D%s' + conf.env.ERLC_INC_PATTERN = '-I%s' + +@TaskGen.extension('.erl') +def process_erl_node(self, node): + tsk = self.create_task('erl', node, node.change_ext('.beam')) + tsk.erlc_incnodes = [tsk.outputs[0].parent] + self.to_incnodes(self.includes) + tsk.env.append_value('ERLC_INCPATHS', [x.abspath() for x in tsk.erlc_incnodes]) + tsk.env.append_value('ERLC_DEFINES', self.to_list(getattr(self, 'defines', []))) + tsk.env.append_value('ERLC_FLAGS', self.to_list(getattr(self, 'flags', []))) + tsk.cwd = tsk.outputs[0].parent + +class erl(Task.Task): + color = 'GREEN' + run_str = '${ERLC} ${ERL_FLAGS} ${ERLC_INC_PATTERN:ERLC_INCPATHS} ${ERLC_DEF_PATTERN:ERLC_DEFINES} ${SRC}' + + def scan(task): + node = task.inputs[0] + + deps = [] + scanned = set([]) + nodes_to_scan = [node] + + for n in nodes_to_scan: + if n.abspath() in scanned: + continue + + for i in re.findall(r'-include\("(.*)"\)\.', n.read()): + for d in task.erlc_incnodes: + r = d.find_node(i) + if r: + deps.append(r) + nodes_to_scan.append(r) + break + scanned.add(n.abspath()) + + return (deps, []) + +@TaskGen.extension('.beam') +def process(self, node): + pass + + +class erl_test(Task.Task): + color = 'BLUE' + run_str = '${ERL} ${ERL_FLAGS} ${ERL_TEST_FLAGS}' + +@feature('eunit') +@after_method('process_source') +def add_erl_test_run(self): + test_modules = [t.outputs[0] for t in self.tasks] + test_task = self.create_task('erl_test') + test_task.set_inputs(self.source + test_modules) + test_task.cwd = test_modules[0].parent + + test_task.env.append_value('ERL_FLAGS', self.to_list(getattr(self, 'flags', []))) + + test_list = ", ".join([m.change_ext("").path_from(test_task.cwd)+":test()" for m in test_modules]) + test_flag = 'halt(case lists:all(fun(Elem) -> Elem == ok end, [%s]) of true -> 0; false -> 1 end).' % test_list + test_task.env.append_value('ERL_TEST_FLAGS', EXEC_NON_INTERACTIVE) + test_task.env.append_value('ERL_TEST_FLAGS', test_flag) + + +class edoc(Task.Task): + color = 'BLUE' + run_str = "${ERL} ${ERL_FLAGS} ${ERL_DOC_FLAGS}" + def keyword(self): + return 'Generating edoc' + +@feature('edoc') +@before_method('process_source') +def add_edoc_task(self): + # do not process source, it would create double erl->beam task + self.meths.remove('process_source') + e = self.path.find_resource(self.source) + t = e.change_ext('.html') + png = t.parent.make_node('erlang.png') + css = t.parent.make_node('stylesheet.css') + tsk = self.create_task('edoc', e, [t, png, css]) + tsk.cwd = tsk.outputs[0].parent + tsk.env.append_value('ERL_DOC_FLAGS', EXEC_NON_INTERACTIVE) + tsk.env.append_value('ERL_DOC_FLAGS', 'edoc:files(["%s"]), halt(0).' % tsk.inputs[0].abspath()) + # TODO the above can break if a file path contains '"' + diff --git a/third_party/waf/waflib/extras/fast_partial.py b/third_party/waf/waflib/extras/fast_partial.py new file mode 100644 index 0000000..90a9472 --- /dev/null +++ b/third_party/waf/waflib/extras/fast_partial.py @@ -0,0 +1,531 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2017-2018 (ita) + +""" +A system for fast partial rebuilds + +Creating a large amount of task objects up front can take some time. +By making a few assumptions, it is possible to avoid posting creating +task objects for targets that are already up-to-date. + +On a silly benchmark the gain observed for 1M tasks can be 5m->10s +for a single file change. + +Usage:: + + def options(opt): + opt.load('fast_partial') + +Assumptions: +* Start with a clean build (run "waf distclean" after enabling) +* Mostly for C/C++/Fortran targets with link tasks (object-only targets are not handled) + try it in the folder generated by utils/genbench.py +* For full project builds: no --targets and no pruning from subfolders +* The installation phase is ignored +* `use=` dependencies are specified up front even across build groups +* Task generator source files are not obtained from globs + +Implementation details: +* The first layer obtains file timestamps to recalculate file hashes only + when necessary (similar to md5_tstamp); the timestamps are then stored + in a dedicated pickle file +* A second layer associates each task generator to a file set to help + detecting changes. Task generators are to create their tasks only when + the related files have been modified. A specific db file is created + to store such data (5m -> 1m10) +* A third layer binds build context proxies onto task generators, replacing + the default context. While loading data for the full build uses more memory + (4GB -> 9GB), partial builds are then much faster (1m10 -> 13s) +* A fourth layer enables a 2-level cache on file signatures to + reduce the size of the main pickle file (13s -> 10s) +""" + +import os +from waflib import Build, Context, Errors, Logs, Task, TaskGen, Utils +from waflib.TaskGen import feature, after_method, taskgen_method +import waflib.Node + +DONE = 0 +DIRTY = 1 +NEEDED = 2 + +SKIPPABLE = ['cshlib', 'cxxshlib', 'cstlib', 'cxxstlib', 'cprogram', 'cxxprogram'] + +TSTAMP_DB = '.wafpickle_tstamp_db_file' + +SAVED_ATTRS = 'root node_sigs task_sigs imp_sigs raw_deps node_deps'.split() + +class bld_proxy(object): + def __init__(self, bld): + object.__setattr__(self, 'bld', bld) + + object.__setattr__(self, 'node_class', type('Nod3', (waflib.Node.Node,), {})) + self.node_class.__module__ = 'waflib.Node' + self.node_class.ctx = self + + object.__setattr__(self, 'root', self.node_class('', None)) + for x in SAVED_ATTRS: + if x != 'root': + object.__setattr__(self, x, {}) + + self.fix_nodes() + + def __setattr__(self, name, value): + bld = object.__getattribute__(self, 'bld') + setattr(bld, name, value) + + def __delattr__(self, name): + bld = object.__getattribute__(self, 'bld') + delattr(bld, name) + + def __getattribute__(self, name): + try: + return object.__getattribute__(self, name) + except AttributeError: + bld = object.__getattribute__(self, 'bld') + return getattr(bld, name) + + def __call__(self, *k, **kw): + return self.bld(*k, **kw) + + def fix_nodes(self): + for x in ('srcnode', 'path', 'bldnode'): + node = self.root.find_dir(getattr(self.bld, x).abspath()) + object.__setattr__(self, x, node) + + def set_key(self, store_key): + object.__setattr__(self, 'store_key', store_key) + + def fix_tg_path(self, *tgs): + # changing Node objects on task generators is possible + # yet, all Node objects must belong to the same parent + for tg in tgs: + tg.path = self.root.make_node(tg.path.abspath()) + + def restore(self): + dbfn = os.path.join(self.variant_dir, Context.DBFILE + self.store_key) + Logs.debug('rev_use: reading %s', dbfn) + try: + data = Utils.readf(dbfn, 'rb') + except (EnvironmentError, EOFError): + # handle missing file/empty file + Logs.debug('rev_use: Could not load the build cache %s (missing)', dbfn) + else: + try: + waflib.Node.pickle_lock.acquire() + waflib.Node.Nod3 = self.node_class + try: + data = Build.cPickle.loads(data) + except Exception as e: + Logs.debug('rev_use: Could not pickle the build cache %s: %r', dbfn, e) + else: + for x in SAVED_ATTRS: + object.__setattr__(self, x, data.get(x, {})) + finally: + waflib.Node.pickle_lock.release() + self.fix_nodes() + + def store(self): + data = {} + for x in Build.SAVED_ATTRS: + data[x] = getattr(self, x) + db = os.path.join(self.variant_dir, Context.DBFILE + self.store_key) + + with waflib.Node.pickle_lock: + waflib.Node.Nod3 = self.node_class + try: + x = Build.cPickle.dumps(data, Build.PROTOCOL) + except Build.cPickle.PicklingError: + root = data['root'] + for node_deps in data['node_deps'].values(): + for idx, node in enumerate(node_deps): + # there may be more cross-context Node objects to fix, + # but this should be the main source + node_deps[idx] = root.find_node(node.abspath()) + x = Build.cPickle.dumps(data, Build.PROTOCOL) + + Logs.debug('rev_use: storing %s', db) + Utils.writef(db + '.tmp', x, m='wb') + try: + st = os.stat(db) + os.remove(db) + if not Utils.is_win32: + os.chown(db + '.tmp', st.st_uid, st.st_gid) + except (AttributeError, OSError): + pass + os.rename(db + '.tmp', db) + +class bld(Build.BuildContext): + def __init__(self, **kw): + super(bld, self).__init__(**kw) + self.hashes_md5_tstamp = {} + + def __call__(self, *k, **kw): + # this is one way of doing it, one could use a task generator method too + bld = kw['bld'] = bld_proxy(self) + ret = TaskGen.task_gen(*k, **kw) + self.task_gen_cache_names = {} + self.add_to_group(ret, group=kw.get('group')) + ret.bld = bld + bld.set_key(ret.path.abspath().replace(os.sep, '') + str(ret.idx)) + return ret + + def is_dirty(self): + return True + + def store_tstamps(self): + # Called after a build is finished + # For each task generator, record all files involved in task objects + # optimization: done only if there was something built + do_store = False + try: + f_deps = self.f_deps + except AttributeError: + f_deps = self.f_deps = {} + self.f_tstamps = {} + + allfiles = set() + for g in self.groups: + for tg in g: + try: + staleness = tg.staleness + except AttributeError: + staleness = DIRTY + + if staleness != DIRTY: + # DONE case: there was nothing built + # NEEDED case: the tg was brought in because of 'use' propagation + # but nothing really changed for them, there may be incomplete + # tasks (object files) and in this case it is best to let the next build + # figure out if an input/output file changed + continue + + do_cache = False + for tsk in tg.tasks: + if tsk.hasrun == Task.SUCCESS: + do_cache = True + pass + elif tsk.hasrun == Task.SKIPPED: + pass + else: + # one failed task, clear the cache for this tg + try: + del f_deps[(tg.path.abspath(), tg.idx)] + except KeyError: + pass + else: + # just store the new state because there is a change + do_store = True + + # skip the rest because there is no valid cache possible + break + else: + if not do_cache: + # all skipped, but is there anything in cache? + try: + f_deps[(tg.path.abspath(), tg.idx)] + except KeyError: + # probably cleared because a wscript file changed + # store it + do_cache = True + + if do_cache: + + # there was a rebuild, store the data structure too + tg.bld.store() + + # all tasks skipped but no cache + # or a successful task build + do_store = True + st = set() + for tsk in tg.tasks: + st.update(tsk.inputs) + st.update(self.node_deps.get(tsk.uid(), [])) + + # TODO do last/when loading the tgs? + lst = [] + for k in ('wscript', 'wscript_build'): + n = tg.path.find_node(k) + if n: + n.get_bld_sig() + lst.append(n.abspath()) + + lst.extend(sorted(x.abspath() for x in st)) + allfiles.update(lst) + f_deps[(tg.path.abspath(), tg.idx)] = lst + + for x in allfiles: + # f_tstamps has everything, while md5_tstamp can be relatively empty on partial builds + self.f_tstamps[x] = self.hashes_md5_tstamp[x][0] + + if do_store: + dbfn = os.path.join(self.variant_dir, TSTAMP_DB) + Logs.debug('rev_use: storing %s', dbfn) + dbfn_tmp = dbfn + '.tmp' + x = Build.cPickle.dumps([self.f_tstamps, f_deps], Build.PROTOCOL) + Utils.writef(dbfn_tmp, x, m='wb') + os.rename(dbfn_tmp, dbfn) + Logs.debug('rev_use: stored %s', dbfn) + + def store(self): + self.store_tstamps() + if self.producer.dirty: + Build.BuildContext.store(self) + + def compute_needed_tgs(self): + # assume the 'use' keys are not modified during the build phase + + dbfn = os.path.join(self.variant_dir, TSTAMP_DB) + Logs.debug('rev_use: Loading %s', dbfn) + try: + data = Utils.readf(dbfn, 'rb') + except (EnvironmentError, EOFError): + Logs.debug('rev_use: Could not load the build cache %s (missing)', dbfn) + self.f_deps = {} + self.f_tstamps = {} + else: + try: + self.f_tstamps, self.f_deps = Build.cPickle.loads(data) + except Exception as e: + Logs.debug('rev_use: Could not pickle the build cache %s: %r', dbfn, e) + self.f_deps = {} + self.f_tstamps = {} + else: + Logs.debug('rev_use: Loaded %s', dbfn) + + + # 1. obtain task generators that contain rebuilds + # 2. obtain the 'use' graph and its dual + stales = set() + reverse_use_map = Utils.defaultdict(list) + use_map = Utils.defaultdict(list) + + for g in self.groups: + for tg in g: + if tg.is_stale(): + stales.add(tg) + + try: + lst = tg.use = Utils.to_list(tg.use) + except AttributeError: + pass + else: + for x in lst: + try: + xtg = self.get_tgen_by_name(x) + except Errors.WafError: + pass + else: + use_map[tg].append(xtg) + reverse_use_map[xtg].append(tg) + + Logs.debug('rev_use: found %r stale tgs', len(stales)) + + # 3. dfs to post downstream tg as stale + visited = set() + def mark_down(tg): + if tg in visited: + return + visited.add(tg) + Logs.debug('rev_use: marking down %r as stale', tg.name) + tg.staleness = DIRTY + for x in reverse_use_map[tg]: + mark_down(x) + for tg in stales: + mark_down(tg) + + # 4. dfs to find ancestors tg to mark as needed + self.needed_tgs = needed_tgs = set() + def mark_needed(tg): + if tg in needed_tgs: + return + needed_tgs.add(tg) + if tg.staleness == DONE: + Logs.debug('rev_use: marking up %r as needed', tg.name) + tg.staleness = NEEDED + for x in use_map[tg]: + mark_needed(x) + for xx in visited: + mark_needed(xx) + + # so we have the whole tg trees to post in the set "needed" + # load their build trees + for tg in needed_tgs: + tg.bld.restore() + tg.bld.fix_tg_path(tg) + + # the stale ones should be fully build, while the needed ones + # may skip a few tasks, see create_compiled_task and apply_link_after below + Logs.debug('rev_use: amount of needed task gens: %r', len(needed_tgs)) + + def post_group(self): + # assumption: we can ignore the folder/subfolders cuts + def tgpost(tg): + try: + f = tg.post + except AttributeError: + pass + else: + f() + + if not self.targets or self.targets == '*': + for tg in self.groups[self.current_group]: + # this can cut quite a lot of tg objects + if tg in self.needed_tgs: + tgpost(tg) + else: + # default implementation + return Build.BuildContext.post_group() + + def get_build_iterator(self): + if not self.targets or self.targets == '*': + self.compute_needed_tgs() + return Build.BuildContext.get_build_iterator(self) + +@taskgen_method +def is_stale(self): + # assume no globs + self.staleness = DIRTY + + # 1. the case of always stale targets + if getattr(self, 'always_stale', False): + return True + + # 2. check if the db file exists + db = os.path.join(self.bld.variant_dir, Context.DBFILE) + try: + dbstat = os.stat(db).st_mtime + except OSError: + Logs.debug('rev_use: must post %r because this is a clean build') + return True + + # 3.a check if the configuration exists + cache_node = self.bld.bldnode.find_node('c4che/build.config.py') + if not cache_node: + return True + + # 3.b check if the configuration changed + if os.stat(cache_node.abspath()).st_mtime > dbstat: + Logs.debug('rev_use: must post %r because the configuration has changed', self.name) + return True + + # 3.c any tstamp data? + try: + f_deps = self.bld.f_deps + except AttributeError: + Logs.debug('rev_use: must post %r because there is no f_deps', self.name) + return True + + # 4. check if this is the first build (no cache) + try: + lst = f_deps[(self.path.abspath(), self.idx)] + except KeyError: + Logs.debug('rev_use: must post %r because there it has no cached data', self.name) + return True + + try: + cache = self.bld.cache_tstamp_rev_use + except AttributeError: + cache = self.bld.cache_tstamp_rev_use = {} + + # 5. check the timestamp of each dependency files listed is unchanged + f_tstamps = self.bld.f_tstamps + for x in lst: + try: + old_ts = f_tstamps[x] + except KeyError: + Logs.debug('rev_use: must post %r because %r is not in cache', self.name, x) + return True + + try: + try: + ts = cache[x] + except KeyError: + ts = cache[x] = os.stat(x).st_mtime + except OSError: + del f_deps[(self.path.abspath(), self.idx)] + Logs.debug('rev_use: must post %r because %r does not exist anymore', self.name, x) + return True + else: + if ts != old_ts: + Logs.debug('rev_use: must post %r because the timestamp on %r changed %r %r', self.name, x, old_ts, ts) + return True + + self.staleness = DONE + return False + +@taskgen_method +def create_compiled_task(self, name, node): + # skip the creation of object files + # assumption: object-only targets are not skippable + if self.staleness == NEEDED: + # only libraries/programs can skip object files + for x in SKIPPABLE: + if x in self.features: + return None + + out = '%s.%d.o' % (node.name, self.idx) + task = self.create_task(name, node, node.parent.find_or_declare(out)) + try: + self.compiled_tasks.append(task) + except AttributeError: + self.compiled_tasks = [task] + return task + +@feature(*SKIPPABLE) +@after_method('apply_link') +def apply_link_after(self): + # cprogram/cxxprogram might be unnecessary + if self.staleness != NEEDED: + return + for tsk in self.tasks: + tsk.hasrun = Task.SKIPPED + +def path_from(self, node): + # handle nodes of distinct types + if node.ctx is not self.ctx: + node = self.ctx.root.make_node(node.abspath()) + return self.default_path_from(node) +waflib.Node.Node.default_path_from = waflib.Node.Node.path_from +waflib.Node.Node.path_from = path_from + +def h_file(self): + # similar to md5_tstamp.py, but with 2-layer cache + # global_cache for the build context common for all task generators + # local_cache for the build context proxy (one by task generator) + # + # the global cache is not persistent + # the local cache is persistent and meant for partial builds + # + # assume all calls are made from a single thread + # + filename = self.abspath() + st = os.stat(filename) + + global_cache = self.ctx.bld.hashes_md5_tstamp + local_cache = self.ctx.hashes_md5_tstamp + + if filename in global_cache: + # value already calculated in this build + cval = global_cache[filename] + + # the value in global cache is assumed to be calculated once + # reverifying it could cause task generators + # to get distinct tstamp values, thus missing rebuilds + local_cache[filename] = cval + return cval[1] + + if filename in local_cache: + cval = local_cache[filename] + if cval[0] == st.st_mtime: + # correct value from a previous build + # put it in the global cache + global_cache[filename] = cval + return cval[1] + + ret = Utils.h_file(filename) + local_cache[filename] = global_cache[filename] = (st.st_mtime, ret) + return ret +waflib.Node.Node.h_file = h_file + diff --git a/third_party/waf/waflib/extras/fc_bgxlf.py b/third_party/waf/waflib/extras/fc_bgxlf.py new file mode 100644 index 0000000..cca1810 --- /dev/null +++ b/third_party/waf/waflib/extras/fc_bgxlf.py @@ -0,0 +1,32 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +from waflib.Tools import fc, fc_config, fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].insert(0, 'fc_bgxlf') + +@conf +def find_bgxlf(conf): + fc = conf.find_program(['bgxlf2003_r','bgxlf2003'], var='FC') + conf.get_xlf_version(fc) + conf.env.FC_NAME = 'BGXLF' + +@conf +def bg_flags(self): + self.env.SONAME_ST = '' + self.env.FCSHLIB_MARKER = '' + self.env.FCSTLIB_MARKER = '' + self.env.FCFLAGS_fcshlib = ['-fPIC'] + self.env.LINKFLAGS_fcshlib = ['-G', '-Wl,-bexpfull'] + +def configure(conf): + conf.find_bgxlf() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.xlf_flags() + conf.bg_flags() + diff --git a/third_party/waf/waflib/extras/fc_cray.py b/third_party/waf/waflib/extras/fc_cray.py new file mode 100644 index 0000000..da733fa --- /dev/null +++ b/third_party/waf/waflib/extras/fc_cray.py @@ -0,0 +1,51 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib.Tools import fc, fc_config, fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].append('fc_cray') + +@conf +def find_crayftn(conf): + """Find the Cray fortran compiler (will look in the environment variable 'FC')""" + fc = conf.find_program(['crayftn'], var='FC') + conf.get_crayftn_version(fc) + conf.env.FC_NAME = 'CRAY' + conf.env.FC_MOD_CAPITALIZATION = 'UPPER.mod' + +@conf +def crayftn_flags(conf): + v = conf.env + v['_FCMODOUTFLAGS'] = ['-em', '-J.'] # enable module files and put them in the current directory + v['FCFLAGS_DEBUG'] = ['-m1'] # more verbose compiler warnings + v['FCFLAGS_fcshlib'] = ['-h pic'] + v['LINKFLAGS_fcshlib'] = ['-h shared'] + + v['FCSTLIB_MARKER'] = '-h static' + v['FCSHLIB_MARKER'] = '-h dynamic' + +@conf +def get_crayftn_version(conf, fc): + version_re = re.compile(r"Cray Fortran\s*:\s*Version\s*(?P<major>\d*)\.(?P<minor>\d*)", re.I).search + cmd = fc + ['-V'] + out,err = fc_config.getoutput(conf, cmd, stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not determine the Cray Fortran compiler version.') + k = match.groupdict() + conf.env['FC_VERSION'] = (k['major'], k['minor']) + +def configure(conf): + conf.find_crayftn() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.crayftn_flags() + diff --git a/third_party/waf/waflib/extras/fc_fujitsu.py b/third_party/waf/waflib/extras/fc_fujitsu.py new file mode 100644 index 0000000..cae676c --- /dev/null +++ b/third_party/waf/waflib/extras/fc_fujitsu.py @@ -0,0 +1,52 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Detection of the Fujitsu Fortran compiler for ARM64FX + +import re +from waflib.Tools import fc,fc_config,fc_scan +from waflib.Configure import conf +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].append('fc_fujitsu') + +@conf +def find_fujitsu(conf): + fc=conf.find_program(['frtpx'],var='FC') + conf.get_fujitsu_version(fc) + conf.env.FC_NAME='FUJITSU' + conf.env.FC_MOD_CAPITALIZATION='lower' + +@conf +def fujitsu_flags(conf): + v=conf.env + v['_FCMODOUTFLAGS']=[] + v['FCFLAGS_DEBUG']=[] + v['FCFLAGS_fcshlib']=[] + v['LINKFLAGS_fcshlib']=[] + v['FCSTLIB_MARKER']='' + v['FCSHLIB_MARKER']='' + +@conf +def get_fujitsu_version(conf,fc): + version_re=re.compile(r"frtpx\s*\(FRT\)\s*(?P<major>\d+)\.(?P<minor>\d+)\.",re.I).search + cmd=fc+['--version'] + out,err=fc_config.getoutput(conf,cmd,stdin=False) + if out: + match=version_re(out) + else: + match=version_re(err) + if not match: + return(False) + conf.fatal('Could not determine the Fujitsu FRT Fortran compiler version.') + else: + k=match.groupdict() + conf.env['FC_VERSION']=(k['major'],k['minor']) + +def configure(conf): + conf.find_fujitsu() + conf.find_program('ar',var='AR') + conf.add_os_flags('ARFLAGS') + if not conf.env.ARFLAGS: + conf.env.ARFLAGS=['rcs'] + conf.fc_flags() + conf.fc_add_flags() + conf.fujitsu_flags() diff --git a/third_party/waf/waflib/extras/fc_nag.py b/third_party/waf/waflib/extras/fc_nag.py new file mode 100644 index 0000000..edcb218 --- /dev/null +++ b/third_party/waf/waflib/extras/fc_nag.py @@ -0,0 +1,61 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib import Utils +from waflib.Tools import fc,fc_config,fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].insert(0, 'fc_nag') + +@conf +def find_nag(conf): + """Find the NAG Fortran Compiler (will look in the environment variable 'FC')""" + + fc = conf.find_program(['nagfor'], var='FC') + conf.get_nag_version(fc) + conf.env.FC_NAME = 'NAG' + conf.env.FC_MOD_CAPITALIZATION = 'lower' + +@conf +def nag_flags(conf): + v = conf.env + v.FCFLAGS_DEBUG = ['-C=all'] + v.FCLNK_TGT_F = ['-o', ''] + v.FC_TGT_F = ['-c', '-o', ''] + +@conf +def nag_modifier_platform(conf): + dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() + nag_modifier_func = getattr(conf, 'nag_modifier_' + dest_os, None) + if nag_modifier_func: + nag_modifier_func() + +@conf +def get_nag_version(conf, fc): + """Get the NAG compiler version""" + + version_re = re.compile(r"^NAG Fortran Compiler *Release *(?P<major>\d*)\.(?P<minor>\d*)", re.M).search + cmd = fc + ['-V'] + + out, err = fc_config.getoutput(conf,cmd,stdin=False) + if out: + match = version_re(out) + if not match: + match = version_re(err) + else: match = version_re(err) + if not match: + conf.fatal('Could not determine the NAG version.') + k = match.groupdict() + conf.env['FC_VERSION'] = (k['major'], k['minor']) + +def configure(conf): + conf.find_nag() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.nag_flags() + conf.nag_modifier_platform() + diff --git a/third_party/waf/waflib/extras/fc_nec.py b/third_party/waf/waflib/extras/fc_nec.py new file mode 100644 index 0000000..67c8680 --- /dev/null +++ b/third_party/waf/waflib/extras/fc_nec.py @@ -0,0 +1,60 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib.Tools import fc, fc_config, fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].append('fc_nec') + +@conf +def find_sxfc(conf): + """Find the NEC fortran compiler (will look in the environment variable 'FC')""" + fc = conf.find_program(['sxf90','sxf03'], var='FC') + conf.get_sxfc_version(fc) + conf.env.FC_NAME = 'NEC' + conf.env.FC_MOD_CAPITALIZATION = 'lower' + +@conf +def sxfc_flags(conf): + v = conf.env + v['_FCMODOUTFLAGS'] = [] # enable module files and put them in the current directory + v['FCFLAGS_DEBUG'] = [] # more verbose compiler warnings + v['FCFLAGS_fcshlib'] = [] + v['LINKFLAGS_fcshlib'] = [] + + v['FCSTLIB_MARKER'] = '' + v['FCSHLIB_MARKER'] = '' + +@conf +def get_sxfc_version(conf, fc): + version_re = re.compile(r"FORTRAN90/SX\s*Version\s*(?P<major>\d*)\.(?P<minor>\d*)", re.I).search + cmd = fc + ['-V'] + out,err = fc_config.getoutput(conf, cmd, stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + version_re=re.compile(r"NEC Fortran 2003 Compiler for\s*(?P<major>\S*)\s*\(c\)\s*(?P<minor>\d*)",re.I).search + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not determine the NEC Fortran compiler version.') + k = match.groupdict() + conf.env['FC_VERSION'] = (k['major'], k['minor']) + +def configure(conf): + conf.find_sxfc() + conf.find_program('sxar',var='AR') + conf.add_os_flags('ARFLAGS') + if not conf.env.ARFLAGS: + conf.env.ARFLAGS=['rcs'] + + conf.fc_flags() + conf.fc_add_flags() + conf.sxfc_flags() diff --git a/third_party/waf/waflib/extras/fc_nfort.py b/third_party/waf/waflib/extras/fc_nfort.py new file mode 100644 index 0000000..c25886b --- /dev/null +++ b/third_party/waf/waflib/extras/fc_nfort.py @@ -0,0 +1,52 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Detection of the NEC Fortran compiler for Aurora Tsubasa + +import re +from waflib.Tools import fc,fc_config,fc_scan +from waflib.Configure import conf +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].append('fc_nfort') + +@conf +def find_nfort(conf): + fc=conf.find_program(['nfort'],var='FC') + conf.get_nfort_version(fc) + conf.env.FC_NAME='NFORT' + conf.env.FC_MOD_CAPITALIZATION='lower' + +@conf +def nfort_flags(conf): + v=conf.env + v['_FCMODOUTFLAGS']=[] + v['FCFLAGS_DEBUG']=[] + v['FCFLAGS_fcshlib']=[] + v['LINKFLAGS_fcshlib']=[] + v['FCSTLIB_MARKER']='' + v['FCSHLIB_MARKER']='' + +@conf +def get_nfort_version(conf,fc): + version_re=re.compile(r"nfort\s*\(NFORT\)\s*(?P<major>\d+)\.(?P<minor>\d+)\.",re.I).search + cmd=fc+['--version'] + out,err=fc_config.getoutput(conf,cmd,stdin=False) + if out: + match=version_re(out) + else: + match=version_re(err) + if not match: + return(False) + conf.fatal('Could not determine the NEC NFORT Fortran compiler version.') + else: + k=match.groupdict() + conf.env['FC_VERSION']=(k['major'],k['minor']) + +def configure(conf): + conf.find_nfort() + conf.find_program('nar',var='AR') + conf.add_os_flags('ARFLAGS') + if not conf.env.ARFLAGS: + conf.env.ARFLAGS=['rcs'] + conf.fc_flags() + conf.fc_add_flags() + conf.nfort_flags() diff --git a/third_party/waf/waflib/extras/fc_open64.py b/third_party/waf/waflib/extras/fc_open64.py new file mode 100644 index 0000000..413719f --- /dev/null +++ b/third_party/waf/waflib/extras/fc_open64.py @@ -0,0 +1,58 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib import Utils +from waflib.Tools import fc,fc_config,fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].insert(0, 'fc_open64') + +@conf +def find_openf95(conf): + """Find the Open64 Fortran Compiler (will look in the environment variable 'FC')""" + + fc = conf.find_program(['openf95', 'openf90'], var='FC') + conf.get_open64_version(fc) + conf.env.FC_NAME = 'OPEN64' + conf.env.FC_MOD_CAPITALIZATION = 'UPPER.mod' + +@conf +def openf95_flags(conf): + v = conf.env + v['FCFLAGS_DEBUG'] = ['-fullwarn'] + +@conf +def openf95_modifier_platform(conf): + dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() + openf95_modifier_func = getattr(conf, 'openf95_modifier_' + dest_os, None) + if openf95_modifier_func: + openf95_modifier_func() + +@conf +def get_open64_version(conf, fc): + """Get the Open64 compiler version""" + + version_re = re.compile(r"Open64 Compiler Suite: *Version *(?P<major>\d*)\.(?P<minor>\d*)", re.I).search + cmd = fc + ['-version'] + + out, err = fc_config.getoutput(conf,cmd,stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not determine the Open64 version.') + k = match.groupdict() + conf.env['FC_VERSION'] = (k['major'], k['minor']) + +def configure(conf): + conf.find_openf95() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.openf95_flags() + conf.openf95_modifier_platform() + diff --git a/third_party/waf/waflib/extras/fc_pgfortran.py b/third_party/waf/waflib/extras/fc_pgfortran.py new file mode 100644 index 0000000..afb2817 --- /dev/null +++ b/third_party/waf/waflib/extras/fc_pgfortran.py @@ -0,0 +1,68 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib.Tools import fc, fc_config, fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].append('fc_pgfortran') + +@conf +def find_pgfortran(conf): + """Find the PGI fortran compiler (will look in the environment variable 'FC')""" + fc = conf.find_program(['pgfortran', 'pgf95', 'pgf90'], var='FC') + conf.get_pgfortran_version(fc) + conf.env.FC_NAME = 'PGFC' + +@conf +def pgfortran_flags(conf): + v = conf.env + v['FCFLAGS_fcshlib'] = ['-shared'] + v['FCFLAGS_DEBUG'] = ['-Minform=inform', '-Mstandard'] # why not + v['FCSTLIB_MARKER'] = '-Bstatic' + v['FCSHLIB_MARKER'] = '-Bdynamic' + v['SONAME_ST'] = '-soname %s' + +@conf +def get_pgfortran_version(conf,fc): + version_re = re.compile(r"The Portland Group", re.I).search + cmd = fc + ['-V'] + out,err = fc_config.getoutput(conf, cmd, stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not verify PGI signature') + cmd = fc + ['-help=variable'] + out,err = fc_config.getoutput(conf, cmd, stdin=False) + if out.find('COMPVER')<0: + conf.fatal('Could not determine the compiler type') + k = {} + prevk = '' + out = out.splitlines() + for line in out: + lst = line.partition('=') + if lst[1] == '=': + key = lst[0].rstrip() + if key == '': + key = prevk + val = lst[2].rstrip() + k[key] = val + else: + prevk = line.partition(' ')[0] + def isD(var): + return var in k + def isT(var): + return var in k and k[var]!='0' + conf.env['FC_VERSION'] = (k['COMPVER'].split('.')) + +def configure(conf): + conf.find_pgfortran() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.pgfortran_flags() + diff --git a/third_party/waf/waflib/extras/fc_solstudio.py b/third_party/waf/waflib/extras/fc_solstudio.py new file mode 100644 index 0000000..53766df --- /dev/null +++ b/third_party/waf/waflib/extras/fc_solstudio.py @@ -0,0 +1,62 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib import Utils +from waflib.Tools import fc,fc_config,fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['linux'].append('fc_solstudio') + +@conf +def find_solstudio(conf): + """Find the Solaris Studio compiler (will look in the environment variable 'FC')""" + + fc = conf.find_program(['sunf95', 'f95', 'sunf90', 'f90'], var='FC') + conf.get_solstudio_version(fc) + conf.env.FC_NAME = 'SOL' + +@conf +def solstudio_flags(conf): + v = conf.env + v['FCFLAGS_fcshlib'] = ['-Kpic'] + v['FCFLAGS_DEBUG'] = ['-w3'] + v['LINKFLAGS_fcshlib'] = ['-G'] + v['FCSTLIB_MARKER'] = '-Bstatic' + v['FCSHLIB_MARKER'] = '-Bdynamic' + v['SONAME_ST'] = '-h %s' + +@conf +def solstudio_modifier_platform(conf): + dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() + solstudio_modifier_func = getattr(conf, 'solstudio_modifier_' + dest_os, None) + if solstudio_modifier_func: + solstudio_modifier_func() + +@conf +def get_solstudio_version(conf, fc): + """Get the compiler version""" + + version_re = re.compile(r"Sun Fortran 95 *(?P<major>\d*)\.(?P<minor>\d*)", re.I).search + cmd = fc + ['-V'] + + out, err = fc_config.getoutput(conf,cmd,stdin=False) + if out: + match = version_re(out) + else: + match = version_re(err) + if not match: + conf.fatal('Could not determine the Sun Studio Fortran version.') + k = match.groupdict() + conf.env['FC_VERSION'] = (k['major'], k['minor']) + +def configure(conf): + conf.find_solstudio() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.solstudio_flags() + conf.solstudio_modifier_platform() + diff --git a/third_party/waf/waflib/extras/fc_xlf.py b/third_party/waf/waflib/extras/fc_xlf.py new file mode 100644 index 0000000..5a3da03 --- /dev/null +++ b/third_party/waf/waflib/extras/fc_xlf.py @@ -0,0 +1,63 @@ +#! /usr/bin/env python +# encoding: utf-8 +# harald at klimachs.de + +import re +from waflib import Utils,Errors +from waflib.Tools import fc,fc_config,fc_scan +from waflib.Configure import conf + +from waflib.Tools.compiler_fc import fc_compiler +fc_compiler['aix'].insert(0, 'fc_xlf') + +@conf +def find_xlf(conf): + """Find the xlf program (will look in the environment variable 'FC')""" + + fc = conf.find_program(['xlf2003_r', 'xlf2003', 'xlf95_r', 'xlf95', 'xlf90_r', 'xlf90', 'xlf_r', 'xlf'], var='FC') + conf.get_xlf_version(fc) + conf.env.FC_NAME='XLF' + +@conf +def xlf_flags(conf): + v = conf.env + v['FCDEFINES_ST'] = '-WF,-D%s' + v['FCFLAGS_fcshlib'] = ['-qpic=small'] + v['FCFLAGS_DEBUG'] = ['-qhalt=w'] + v['LINKFLAGS_fcshlib'] = ['-Wl,-shared'] + +@conf +def xlf_modifier_platform(conf): + dest_os = conf.env['DEST_OS'] or Utils.unversioned_sys_platform() + xlf_modifier_func = getattr(conf, 'xlf_modifier_' + dest_os, None) + if xlf_modifier_func: + xlf_modifier_func() + +@conf +def get_xlf_version(conf, fc): + """Get the compiler version""" + + cmd = fc + ['-qversion'] + try: + out, err = conf.cmd_and_log(cmd, output=0) + except Errors.WafError: + conf.fatal('Could not find xlf %r' % cmd) + + for v in (r"IBM XL Fortran.* V(?P<major>\d*)\.(?P<minor>\d*)",): + version_re = re.compile(v, re.I).search + match = version_re(out or err) + if match: + k = match.groupdict() + conf.env['FC_VERSION'] = (k['major'], k['minor']) + break + else: + conf.fatal('Could not determine the XLF version.') + +def configure(conf): + conf.find_xlf() + conf.find_ar() + conf.fc_flags() + conf.fc_add_flags() + conf.xlf_flags() + conf.xlf_modifier_platform() + diff --git a/third_party/waf/waflib/extras/file_to_object.py b/third_party/waf/waflib/extras/file_to_object.py new file mode 100644 index 0000000..13d2aef --- /dev/null +++ b/third_party/waf/waflib/extras/file_to_object.py @@ -0,0 +1,142 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Tool to embed file into objects + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2014" + +""" + +This tool allows to embed file contents in object files (.o). +It is not exactly portable, and the file contents are reachable +using various non-portable fashions. +The goal here is to provide a functional interface to the embedding +of file data in objects. +See the ``playground/embedded_resources`` example for an example. + +Usage:: + + bld( + name='pipeline', + # ^ Reference this in use="..." for things using the generated code + features='file_to_object', + source='some.file', + # ^ Name of the file to embed in binary section. + ) + +Known issues: + +- Destination is named like source, with extension renamed to .o + eg. some.file -> some.o + +""" + +import os, sys +from waflib import Task, TaskGen, Errors + +def filename_c_escape(x): + return x.replace("\\", "\\\\") + +class file_to_object_s(Task.Task): + color = 'CYAN' + vars = ['DEST_CPU', 'DEST_BINFMT'] + + def run(self): + name = [] + for i, x in enumerate(self.inputs[0].name): + if x.isalnum(): + name.append(x) + else: + name.append('_') + file = self.inputs[0].abspath() + size = os.path.getsize(file) + if self.env.DEST_CPU in ('x86_64', 'ia', 'aarch64'): + unit = 'quad' + align = 8 + elif self.env.DEST_CPU in ('x86','arm', 'thumb', 'm68k'): + unit = 'long' + align = 4 + else: + raise Errors.WafError("Unsupported DEST_CPU, please report bug!") + + file = filename_c_escape(file) + name = "_binary_" + "".join(name) + rodata = ".section .rodata" + if self.env.DEST_BINFMT == "mac-o": + name = "_" + name + rodata = ".section __TEXT,__const" + + with open(self.outputs[0].abspath(), 'w') as f: + f.write(\ +""" + .global %(name)s_start + .global %(name)s_end + .global %(name)s_size + %(rodata)s +%(name)s_start: + .incbin "%(file)s" +%(name)s_end: + .align %(align)d +%(name)s_size: + .%(unit)s 0x%(size)x +""" % locals()) + +class file_to_object_c(Task.Task): + color = 'CYAN' + def run(self): + name = [] + for i, x in enumerate(self.inputs[0].name): + if x.isalnum(): + name.append(x) + else: + name.append('_') + file = self.inputs[0].abspath() + size = os.path.getsize(file) + + name = "_binary_" + "".join(name) + + def char_to_num(ch): + if sys.version_info[0] < 3: + return ord(ch) + return ch + + data = self.inputs[0].read('rb') + lines, line = [], [] + for idx_byte, byte in enumerate(data): + line.append(byte) + if len(line) > 15 or idx_byte == size-1: + lines.append(", ".join(("0x%02x" % char_to_num(x)) for x in line)) + line = [] + data = ",\n ".join(lines) + + self.outputs[0].write(\ +""" +unsigned long %(name)s_size = %(size)dL; +char const %(name)s_start[] = { + %(data)s +}; +char const %(name)s_end[] = {}; +""" % locals()) + +@TaskGen.feature('file_to_object') +@TaskGen.before_method('process_source') +def tg_file_to_object(self): + bld = self.bld + sources = self.to_nodes(self.source) + targets = [] + for src in sources: + if bld.env.F2O_METHOD == ["asm"]: + tgt = src.parent.find_or_declare(src.name + '.f2o.s') + tsk = self.create_task('file_to_object_s', src, tgt) + tsk.cwd = src.parent.abspath() # verify + else: + tgt = src.parent.find_or_declare(src.name + '.f2o.c') + tsk = self.create_task('file_to_object_c', src, tgt) + tsk.cwd = src.parent.abspath() # verify + targets.append(tgt) + self.source = targets + +def configure(conf): + conf.load('gas') + conf.env.F2O_METHOD = ["c"] + diff --git a/third_party/waf/waflib/extras/fluid.py b/third_party/waf/waflib/extras/fluid.py new file mode 100644 index 0000000..4814a35 --- /dev/null +++ b/third_party/waf/waflib/extras/fluid.py @@ -0,0 +1,30 @@ +#!/usr/bin/python +# encoding: utf-8 +# Grygoriy Fuchedzhy 2009 + +""" +Compile fluid files (fltk graphic library). Use the 'fluid' feature in conjunction with the 'cxx' feature. +""" + +from waflib import Task +from waflib.TaskGen import extension + +class fluid(Task.Task): + color = 'BLUE' + ext_out = ['.h'] + run_str = '${FLUID} -c -o ${TGT[0].abspath()} -h ${TGT[1].abspath()} ${SRC}' + +@extension('.fl') +def process_fluid(self, node): + """add the .fl to the source list; the cxx file generated will be compiled when possible""" + cpp = node.change_ext('.cpp') + hpp = node.change_ext('.hpp') + self.create_task('fluid', node, [cpp, hpp]) + + if 'cxx' in self.features: + self.source.append(cpp) + +def configure(conf): + conf.find_program('fluid', var='FLUID') + conf.check_cfg(path='fltk-config', package='', args='--cxxflags --ldflags', uselib_store='FLTK', mandatory=True) + diff --git a/third_party/waf/waflib/extras/freeimage.py b/third_party/waf/waflib/extras/freeimage.py new file mode 100644 index 0000000..f27e525 --- /dev/null +++ b/third_party/waf/waflib/extras/freeimage.py @@ -0,0 +1,74 @@ +#!/usr/bin/env python +# encoding: utf-8 +# +# written by Sylvain Rouquette, 2011 + +''' +To add the freeimage tool to the waf file: +$ ./waf-light --tools=compat15,freeimage + or, if you have waf >= 1.6.2 +$ ./waf update --files=freeimage + +The wscript will look like: + +def options(opt): + opt.load('compiler_cxx freeimage') + +def configure(conf): + conf.load('compiler_cxx freeimage') + + # you can call check_freeimage with some parameters. + # It's optional on Linux, it's 'mandatory' on Windows if + # you didn't use --fi-path on the command-line + + # conf.check_freeimage(path='FreeImage/Dist', fip=True) + +def build(bld): + bld(source='main.cpp', target='app', use='FREEIMAGE') +''' + +from waflib import Utils +from waflib.Configure import conf + + +def options(opt): + opt.add_option('--fi-path', type='string', default='', dest='fi_path', + help='''path to the FreeImage directory \ + where the files are e.g. /FreeImage/Dist''') + opt.add_option('--fip', action='store_true', default=False, dest='fip', + help='link with FreeImagePlus') + opt.add_option('--fi-static', action='store_true', + default=False, dest='fi_static', + help="link as shared libraries") + + +@conf +def check_freeimage(self, path=None, fip=False): + self.start_msg('Checking FreeImage') + if not self.env['CXX']: + self.fatal('you must load compiler_cxx before loading freeimage') + prefix = self.options.fi_static and 'ST' or '' + platform = Utils.unversioned_sys_platform() + if platform == 'win32': + if not path: + self.fatal('you must specify the path to FreeImage. \ + use --fi-path=/FreeImage/Dist') + else: + self.env['INCLUDES_FREEIMAGE'] = path + self.env['%sLIBPATH_FREEIMAGE' % prefix] = path + libs = ['FreeImage'] + if self.options.fip: + libs.append('FreeImagePlus') + if platform == 'win32': + self.env['%sLIB_FREEIMAGE' % prefix] = libs + else: + self.env['%sLIB_FREEIMAGE' % prefix] = [i.lower() for i in libs] + self.end_msg('ok') + + +def configure(conf): + platform = Utils.unversioned_sys_platform() + if platform == 'win32' and not conf.options.fi_path: + return + conf.check_freeimage(conf.options.fi_path, conf.options.fip) + diff --git a/third_party/waf/waflib/extras/fsb.py b/third_party/waf/waflib/extras/fsb.py new file mode 100644 index 0000000..1b8f398 --- /dev/null +++ b/third_party/waf/waflib/extras/fsb.py @@ -0,0 +1,31 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 (ita) + +""" +Fully sequential builds + +The previous tasks from task generators are re-processed, and this may lead to speed issues +Yet, if you are using this, speed is probably a minor concern +""" + +from waflib import Build + +def options(opt): + pass + +def configure(conf): + pass + +class FSBContext(Build.BuildContext): + def __call__(self, *k, **kw): + ret = Build.BuildContext.__call__(self, *k, **kw) + + # evaluate the results immediately + Build.BuildContext.compile(self) + + return ret + + def compile(self): + pass + diff --git a/third_party/waf/waflib/extras/fsc.py b/third_party/waf/waflib/extras/fsc.py new file mode 100644 index 0000000..c67e70b --- /dev/null +++ b/third_party/waf/waflib/extras/fsc.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 (ita) + +""" +Experimental F# stuff + +FSC="mono /path/to/fsc.exe" waf configure build +""" + +from waflib import Utils, Task +from waflib.TaskGen import before_method, after_method, feature +from waflib.Tools import ccroot, cs + +ccroot.USELIB_VARS['fsc'] = set(['CSFLAGS', 'ASSEMBLIES', 'RESOURCES']) + +@feature('fs') +@before_method('process_source') +def apply_fsc(self): + cs_nodes = [] + no_nodes = [] + for x in self.to_nodes(self.source): + if x.name.endswith('.fs'): + cs_nodes.append(x) + else: + no_nodes.append(x) + self.source = no_nodes + + bintype = getattr(self, 'type', self.gen.endswith('.dll') and 'library' or 'exe') + self.cs_task = tsk = self.create_task('fsc', cs_nodes, self.path.find_or_declare(self.gen)) + tsk.env.CSTYPE = '/target:%s' % bintype + tsk.env.OUT = '/out:%s' % tsk.outputs[0].abspath() + + inst_to = getattr(self, 'install_path', bintype=='exe' and '${BINDIR}' or '${LIBDIR}') + if inst_to: + # note: we are making a copy, so the files added to cs_task.outputs won't be installed automatically + mod = getattr(self, 'chmod', bintype=='exe' and Utils.O755 or Utils.O644) + self.install_task = self.add_install_files(install_to=inst_to, install_from=self.cs_task.outputs[:], chmod=mod) + +feature('fs')(cs.use_cs) +after_method('apply_fsc')(cs.use_cs) + +feature('fs')(cs.debug_cs) +after_method('apply_fsc', 'use_cs')(cs.debug_cs) + +class fsc(Task.Task): + """ + Compile F# files + """ + color = 'YELLOW' + run_str = '${FSC} ${CSTYPE} ${CSFLAGS} ${ASS_ST:ASSEMBLIES} ${RES_ST:RESOURCES} ${OUT} ${SRC}' + +def configure(conf): + """ + Find a F# compiler, set the variable FSC for the compiler and FS_NAME (mono or fsc) + """ + conf.find_program(['fsc.exe', 'fsharpc'], var='FSC') + conf.env.ASS_ST = '/r:%s' + conf.env.RES_ST = '/resource:%s' + + conf.env.FS_NAME = 'fsc' + if str(conf.env.FSC).lower().find('fsharpc') > -1: + conf.env.FS_NAME = 'mono' + diff --git a/third_party/waf/waflib/extras/gccdeps.py b/third_party/waf/waflib/extras/gccdeps.py new file mode 100644 index 0000000..5d2f0dd --- /dev/null +++ b/third_party/waf/waflib/extras/gccdeps.py @@ -0,0 +1,244 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2008-2010 (ita) + +""" +Execute the tasks with gcc -MD, read the dependencies from the .d file +and prepare the dependency calculation for the next run. +This affects the cxx class, so make sure to load Qt5 after this tool. + +Usage:: + + def options(opt): + opt.load('compiler_cxx') + def configure(conf): + conf.load('compiler_cxx gccdeps') +""" + +import os, re, threading +from waflib import Task, Logs, Utils, Errors +from waflib.Tools import asm, c, c_preproc, cxx +from waflib.TaskGen import before_method, feature + +lock = threading.Lock() + +gccdeps_flags = ['-MD'] +if not c_preproc.go_absolute: + gccdeps_flags = ['-MMD'] + +# Third-party tools are allowed to add extra names in here with append() +supported_compilers = ['gas', 'gcc', 'icc', 'clang'] + +re_o = re.compile(r"\.o$") +re_splitter = re.compile(r'(?<!\\)\s+') # split by space, except when spaces are escaped + +def remove_makefile_rule_lhs(line): + # Splitting on a plain colon would accidentally match inside a + # Windows absolute-path filename, so we must search for a colon + # followed by whitespace to find the divider between LHS and RHS + # of the Makefile rule. + rulesep = ': ' + + sep_idx = line.find(rulesep) + if sep_idx >= 0: + return line[sep_idx + 2:] + else: + return line + +def path_to_node(base_node, path, cached_nodes): + # Take the base node and the path and return a node + # Results are cached because searching the node tree is expensive + # The following code is executed by threads, it is not safe, so a lock is needed... + if getattr(path, '__hash__'): + node_lookup_key = (base_node, path) + else: + # Not hashable, assume it is a list and join into a string + node_lookup_key = (base_node, os.path.sep.join(path)) + + try: + node = cached_nodes[node_lookup_key] + except KeyError: + # retry with lock on cache miss + with lock: + try: + node = cached_nodes[node_lookup_key] + except KeyError: + node = cached_nodes[node_lookup_key] = base_node.find_resource(path) + + return node + +def post_run(self): + if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: + return super(self.derived_gccdeps, self).post_run() + + deps_filename = self.outputs[0].abspath() + deps_filename = re_o.sub('.d', deps_filename) + try: + deps_txt = Utils.readf(deps_filename) + except EnvironmentError: + Logs.error('Could not find a .d dependency file, are cflags/cxxflags overwritten?') + raise + + # Compilers have the choice to either output the file's dependencies + # as one large Makefile rule: + # + # /path/to/file.o: /path/to/dep1.h \ + # /path/to/dep2.h \ + # /path/to/dep3.h \ + # ... + # + # or as many individual rules: + # + # /path/to/file.o: /path/to/dep1.h + # /path/to/file.o: /path/to/dep2.h + # /path/to/file.o: /path/to/dep3.h + # ... + # + # So the first step is to sanitize the input by stripping out the left- + # hand side of all these lines. After that, whatever remains are the + # implicit dependencies of task.outputs[0] + deps_txt = '\n'.join([remove_makefile_rule_lhs(line) for line in deps_txt.splitlines()]) + + # Now join all the lines together + deps_txt = deps_txt.replace('\\\n', '') + + dep_paths = deps_txt.strip() + dep_paths = [x.replace('\\ ', ' ') for x in re_splitter.split(dep_paths) if x] + + resolved_nodes = [] + unresolved_names = [] + bld = self.generator.bld + + # Dynamically bind to the cache + try: + cached_nodes = bld.cached_nodes + except AttributeError: + cached_nodes = bld.cached_nodes = {} + + for path in dep_paths: + + node = None + if os.path.isabs(path): + node = path_to_node(bld.root, path, cached_nodes) + else: + # TODO waf 1.9 - single cwd value + base_node = getattr(bld, 'cwdx', bld.bldnode) + # when calling find_resource, make sure the path does not contain '..' + path = [k for k in Utils.split_path(path) if k and k != '.'] + while '..' in path: + idx = path.index('..') + if idx == 0: + path = path[1:] + base_node = base_node.parent + else: + del path[idx] + del path[idx-1] + + node = path_to_node(base_node, path, cached_nodes) + + if not node: + raise ValueError('could not find %r for %r' % (path, self)) + + if id(node) == id(self.inputs[0]): + # ignore the source file, it is already in the dependencies + # this way, successful config tests may be retrieved from the cache + continue + + resolved_nodes.append(node) + + Logs.debug('deps: gccdeps for %s returned %s', self, resolved_nodes) + + bld.node_deps[self.uid()] = resolved_nodes + bld.raw_deps[self.uid()] = unresolved_names + + try: + del self.cache_sig + except AttributeError: + pass + + Task.Task.post_run(self) + +def scan(self): + if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: + return super(self.derived_gccdeps, self).scan() + + resolved_nodes = self.generator.bld.node_deps.get(self.uid(), []) + unresolved_names = [] + return (resolved_nodes, unresolved_names) + +def sig_implicit_deps(self): + if not self.__class__.__name__ in self.env.ENABLE_GCCDEPS: + return super(self.derived_gccdeps, self).sig_implicit_deps() + bld = self.generator.bld + + try: + return self.compute_sig_implicit_deps() + except Errors.TaskNotReady: + raise ValueError("Please specify the build order precisely with gccdeps (asm/c/c++ tasks)") + except EnvironmentError: + # If a file is renamed, assume the dependencies are stale and must be recalculated + for x in bld.node_deps.get(self.uid(), []): + if not x.is_bld() and not x.exists(): + try: + del x.parent.children[x.name] + except KeyError: + pass + + key = self.uid() + bld.node_deps[key] = [] + bld.raw_deps[key] = [] + return Utils.SIG_NIL + +def wrap_compiled_task(classname): + derived_class = type(classname, (Task.classes[classname],), {}) + derived_class.derived_gccdeps = derived_class + derived_class.post_run = post_run + derived_class.scan = scan + derived_class.sig_implicit_deps = sig_implicit_deps + +for k in ('asm', 'c', 'cxx'): + if k in Task.classes: + wrap_compiled_task(k) + +@before_method('process_source') +@feature('force_gccdeps') +def force_gccdeps(self): + self.env.ENABLE_GCCDEPS = ['asm', 'c', 'cxx'] + +def configure(conf): + # in case someone provides a --enable-gccdeps command-line option + if not getattr(conf.options, 'enable_gccdeps', True): + return + + global gccdeps_flags + flags = conf.env.GCCDEPS_FLAGS or gccdeps_flags + if conf.env.ASM_NAME in supported_compilers: + try: + conf.check(fragment='', features='asm force_gccdeps', asflags=flags, compile_filename='test.S', msg='Checking for asm flags %r' % ''.join(flags)) + except Errors.ConfigurationError: + pass + else: + conf.env.append_value('ASFLAGS', flags) + conf.env.append_unique('ENABLE_GCCDEPS', 'asm') + + if conf.env.CC_NAME in supported_compilers: + try: + conf.check(fragment='int main() { return 0; }', features='c force_gccdeps', cflags=flags, msg='Checking for c flags %r' % ''.join(flags)) + except Errors.ConfigurationError: + pass + else: + conf.env.append_value('CFLAGS', flags) + conf.env.append_unique('ENABLE_GCCDEPS', 'c') + + if conf.env.CXX_NAME in supported_compilers: + try: + conf.check(fragment='int main() { return 0; }', features='cxx force_gccdeps', cxxflags=flags, msg='Checking for cxx flags %r' % ''.join(flags)) + except Errors.ConfigurationError: + pass + else: + conf.env.append_value('CXXFLAGS', flags) + conf.env.append_unique('ENABLE_GCCDEPS', 'cxx') + +def options(opt): + raise ValueError('Do not load gccdeps options') + diff --git a/third_party/waf/waflib/extras/gdbus.py b/third_party/waf/waflib/extras/gdbus.py new file mode 100644 index 0000000..0e0476e --- /dev/null +++ b/third_party/waf/waflib/extras/gdbus.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Copyright Garmin International or its subsidiaries, 2018 +# +# Heavily based on dbus.py + +""" +Compiles dbus files with **gdbus-codegen** +Typical usage:: + def options(opt): + opt.load('compiler_c gdbus') + def configure(conf): + conf.load('compiler_c gdbus') + def build(bld): + tg = bld.program( + includes = '.', + source = bld.path.ant_glob('*.c'), + target = 'gnome-hello') + tg.add_gdbus_file('test.xml', 'com.example.example.', 'Example') +""" + +from waflib import Task, Errors, Utils +from waflib.TaskGen import taskgen_method, before_method + +@taskgen_method +def add_gdbus_file(self, filename, prefix, namespace, export=False): + """ + Adds a dbus file to the list of dbus files to process. Store them in the attribute *dbus_lst*. + :param filename: xml file to compile + :type filename: string + :param prefix: interface prefix (--interface-prefix=prefix) + :type prefix: string + :param mode: C namespace (--c-namespace=namespace) + :type mode: string + :param export: Export Headers? + :type export: boolean + """ + if not hasattr(self, 'gdbus_lst'): + self.gdbus_lst = [] + if not 'process_gdbus' in self.meths: + self.meths.append('process_gdbus') + self.gdbus_lst.append([filename, prefix, namespace, export]) + +@before_method('process_source') +def process_gdbus(self): + """ + Processes the dbus files stored in the attribute *gdbus_lst* to create :py:class:`gdbus_binding_tool` instances. + """ + output_node = self.path.get_bld().make_node(['gdbus', self.get_name()]) + sources = [] + + for filename, prefix, namespace, export in getattr(self, 'gdbus_lst', []): + node = self.path.find_resource(filename) + if not node: + raise Errors.WafError('file not found ' + filename) + c_file = output_node.find_or_declare(node.change_ext('.c').name) + h_file = output_node.find_or_declare(node.change_ext('.h').name) + tsk = self.create_task('gdbus_binding_tool', node, [c_file, h_file]) + tsk.cwd = output_node.abspath() + + tsk.env.GDBUS_CODEGEN_INTERFACE_PREFIX = prefix + tsk.env.GDBUS_CODEGEN_NAMESPACE = namespace + tsk.env.GDBUS_CODEGEN_OUTPUT = node.change_ext('').name + sources.append(c_file) + + if sources: + output_node.mkdir() + self.source = Utils.to_list(self.source) + sources + self.includes = [output_node] + self.to_incnodes(getattr(self, 'includes', [])) + if export: + self.export_includes = [output_node] + self.to_incnodes(getattr(self, 'export_includes', [])) + +class gdbus_binding_tool(Task.Task): + """ + Compiles a dbus file + """ + color = 'BLUE' + ext_out = ['.h', '.c'] + run_str = '${GDBUS_CODEGEN} --interface-prefix ${GDBUS_CODEGEN_INTERFACE_PREFIX} --generate-c-code ${GDBUS_CODEGEN_OUTPUT} --c-namespace ${GDBUS_CODEGEN_NAMESPACE} --c-generate-object-manager ${SRC[0].abspath()}' + shell = True + +def configure(conf): + """ + Detects the program gdbus-codegen and sets ``conf.env.GDBUS_CODEGEN`` + """ + conf.find_program('gdbus-codegen', var='GDBUS_CODEGEN') + diff --git a/third_party/waf/waflib/extras/genpybind.py b/third_party/waf/waflib/extras/genpybind.py new file mode 100644 index 0000000..ac206ee --- /dev/null +++ b/third_party/waf/waflib/extras/genpybind.py @@ -0,0 +1,194 @@ +import os +import pipes +import subprocess +import sys + +from waflib import Logs, Task, Context +from waflib.Tools.c_preproc import scan as scan_impl +# ^-- Note: waflib.extras.gccdeps.scan does not work for us, +# due to its current implementation: +# The -MD flag is injected into the {C,CXX}FLAGS environment variable and +# dependencies are read out in a separate step after compiling by reading +# the .d file saved alongside the object file. +# As the genpybind task refers to a header file that is never compiled itself, +# gccdeps will not be able to extract the list of dependencies. + +from waflib.TaskGen import feature, before_method + + +def join_args(args): + return " ".join(pipes.quote(arg) for arg in args) + + +def configure(cfg): + cfg.load("compiler_cxx") + cfg.load("python") + cfg.check_python_version(minver=(2, 7)) + if not cfg.env.LLVM_CONFIG: + cfg.find_program("llvm-config", var="LLVM_CONFIG") + if not cfg.env.GENPYBIND: + cfg.find_program("genpybind", var="GENPYBIND") + + # find clang reasource dir for builtin headers + cfg.env.GENPYBIND_RESOURCE_DIR = os.path.join( + cfg.cmd_and_log(cfg.env.LLVM_CONFIG + ["--libdir"]).strip(), + "clang", + cfg.cmd_and_log(cfg.env.LLVM_CONFIG + ["--version"]).strip()) + if os.path.exists(cfg.env.GENPYBIND_RESOURCE_DIR): + cfg.msg("Checking clang resource dir", cfg.env.GENPYBIND_RESOURCE_DIR) + else: + cfg.fatal("Clang resource dir not found") + + +@feature("genpybind") +@before_method("process_source") +def generate_genpybind_source(self): + """ + Run genpybind on the headers provided in `source` and compile/link the + generated code instead. This works by generating the code on the fly and + swapping the source node before `process_source` is run. + """ + # name of module defaults to name of target + module = getattr(self, "module", self.target) + + # create temporary source file in build directory to hold generated code + out = "genpybind-%s.%d.cpp" % (module, self.idx) + out = self.path.get_bld().find_or_declare(out) + + task = self.create_task("genpybind", self.to_nodes(self.source), out) + # used to detect whether CFLAGS or CXXFLAGS should be passed to genpybind + task.features = self.features + task.module = module + # can be used to select definitions to include in the current module + # (when header files are shared by more than one module) + task.genpybind_tags = self.to_list(getattr(self, "genpybind_tags", [])) + # additional include directories + task.includes = self.to_list(getattr(self, "includes", [])) + task.genpybind = self.env.GENPYBIND + + # Tell waf to compile/link the generated code instead of the headers + # originally passed-in via the `source` parameter. (see `process_source`) + self.source = [out] + + +class genpybind(Task.Task): # pylint: disable=invalid-name + """ + Runs genpybind on headers provided as input to this task. + Generated code will be written to the first (and only) output node. + """ + quiet = True + color = "PINK" + scan = scan_impl + + @staticmethod + def keyword(): + return "Analyzing" + + def run(self): + if not self.inputs: + return + + args = self.find_genpybind() + self._arguments( + resource_dir=self.env.GENPYBIND_RESOURCE_DIR) + + output = self.run_genpybind(args) + + # For debugging / log output + pasteable_command = join_args(args) + + # write generated code to file in build directory + # (will be compiled during process_source stage) + (output_node,) = self.outputs + output_node.write("// {}\n{}\n".format( + pasteable_command.replace("\n", "\n// "), output)) + + def find_genpybind(self): + return self.genpybind + + def run_genpybind(self, args): + bld = self.generator.bld + + kwargs = dict(cwd=bld.variant_dir) + if hasattr(bld, "log_command"): + bld.log_command(args, kwargs) + else: + Logs.debug("runner: {!r}".format(args)) + proc = subprocess.Popen( + args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs) + stdout, stderr = proc.communicate() + + if not isinstance(stdout, str): + stdout = stdout.decode(sys.stdout.encoding, errors="replace") + if not isinstance(stderr, str): + stderr = stderr.decode(sys.stderr.encoding, errors="replace") + + if proc.returncode != 0: + bld.fatal( + "genpybind returned {code} during the following call:" + "\n{command}\n\n{stdout}\n\n{stderr}".format( + code=proc.returncode, + command=join_args(args), + stdout=stdout, + stderr=stderr, + )) + + if stderr.strip(): + Logs.debug("non-fatal warnings during genpybind run:\n{}".format(stderr)) + + return stdout + + def _include_paths(self): + return self.generator.to_incnodes(self.includes + self.env.INCLUDES) + + def _inputs_as_relative_includes(self): + include_paths = self._include_paths() + relative_includes = [] + for node in self.inputs: + for inc in include_paths: + if node.is_child_of(inc): + relative_includes.append(node.path_from(inc)) + break + else: + self.generator.bld.fatal("could not resolve {}".format(node)) + return relative_includes + + def _arguments(self, genpybind_parse=None, resource_dir=None): + args = [] + relative_includes = self._inputs_as_relative_includes() + is_cxx = "cxx" in self.features + + # options for genpybind + args.extend(["--genpybind-module", self.module]) + if self.genpybind_tags: + args.extend(["--genpybind-tag"] + self.genpybind_tags) + if relative_includes: + args.extend(["--genpybind-include"] + relative_includes) + if genpybind_parse: + args.extend(["--genpybind-parse", genpybind_parse]) + + args.append("--") + + # headers to be processed by genpybind + args.extend(node.abspath() for node in self.inputs) + + args.append("--") + + # options for clang/genpybind-parse + args.append("-D__GENPYBIND__") + args.append("-xc++" if is_cxx else "-xc") + has_std_argument = False + for flag in self.env["CXXFLAGS" if is_cxx else "CFLAGS"]: + flag = flag.replace("-std=gnu", "-std=c") + if flag.startswith("-std=c"): + has_std_argument = True + args.append(flag) + if not has_std_argument: + args.append("-std=c++14") + args.extend("-I{}".format(n.abspath()) for n in self._include_paths()) + args.extend("-D{}".format(p) for p in self.env.DEFINES) + + # point to clang resource dir, if specified + if resource_dir: + args.append("-resource-dir={}".format(resource_dir)) + + return args diff --git a/third_party/waf/waflib/extras/gob2.py b/third_party/waf/waflib/extras/gob2.py new file mode 100644 index 0000000..b4fa3b9 --- /dev/null +++ b/third_party/waf/waflib/extras/gob2.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Ali Sabil, 2007 + +from waflib import TaskGen + +TaskGen.declare_chain( + name = 'gob2', + rule = '${GOB2} -o ${TGT[0].bld_dir()} ${GOB2FLAGS} ${SRC}', + ext_in = '.gob', + ext_out = '.c' +) + +def configure(conf): + conf.find_program('gob2', var='GOB2') + conf.env['GOB2FLAGS'] = '' + diff --git a/third_party/waf/waflib/extras/halide.py b/third_party/waf/waflib/extras/halide.py new file mode 100644 index 0000000..6078e38 --- /dev/null +++ b/third_party/waf/waflib/extras/halide.py @@ -0,0 +1,151 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Halide code generation tool + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2014" + +""" + +Tool to run `Halide <http://halide-lang.org>`_ code generators. + +Usage:: + + bld( + name='pipeline', + # ^ Reference this in use="..." for things using the generated code + #target=['pipeline.o', 'pipeline.h'] + # ^ by default, name.{o,h} is added, but you can set the outputs here + features='halide', + halide_env="HL_TRACE=1 HL_TARGET=host-opencl-gpu_debug", + # ^ Environment passed to the generator, + # can be a dict, k/v list, or string. + args=[], + # ^ Command-line arguments to the generator (optional), + # eg. to give parameters to the scheduling + source='pipeline_gen', + # ^ Name of the source executable + ) + + +Known issues: + + +- Currently only supports Linux (no ".exe") + +- Doesn't rerun on input modification when input is part of a build + chain, and has been modified externally. + +""" + +import os +from waflib import Task, Utils, Options, TaskGen, Errors + +class run_halide_gen(Task.Task): + color = 'CYAN' + vars = ['HALIDE_ENV', 'HALIDE_ARGS'] + run_str = "${SRC[0].abspath()} ${HALIDE_ARGS}" + def __str__(self): + stuff = "halide" + stuff += ("[%s]" % (",".join( + ('%s=%s' % (k,v)) for k, v in sorted(self.env.env.items())))) + return Task.Task.__str__(self).replace(self.__class__.__name__, + stuff) + +@TaskGen.feature('halide') +@TaskGen.before_method('process_source') +def halide(self): + Utils.def_attrs(self, + args=[], + halide_env={}, + ) + + bld = self.bld + + env = self.halide_env + try: + if isinstance(env, str): + env = dict(x.split('=') for x in env.split()) + elif isinstance(env, list): + env = dict(x.split('=') for x in env) + assert isinstance(env, dict) + except Exception as e: + if not isinstance(e, ValueError) \ + and not isinstance(e, AssertionError): + raise + raise Errors.WafError( + "halide_env must be under the form" \ + " {'HL_x':'a', 'HL_y':'b'}" \ + " or ['HL_x=y', 'HL_y=b']" \ + " or 'HL_x=y HL_y=b'") + + src = self.to_nodes(self.source) + assert len(src) == 1, "Only one source expected" + src = src[0] + + args = Utils.to_list(self.args) + + def change_ext(src, ext): + # Return a node with a new extension, in an appropriate folder + name = src.name + xpos = src.name.rfind('.') + if xpos == -1: + xpos = len(src.name) + newname = name[:xpos] + ext + if src.is_child_of(bld.bldnode): + node = src.get_src().parent.find_or_declare(newname) + else: + node = bld.bldnode.find_or_declare(newname) + return node + + def to_nodes(self, lst, path=None): + tmp = [] + path = path or self.path + find = path.find_or_declare + + if isinstance(lst, self.path.__class__): + lst = [lst] + + for x in Utils.to_list(lst): + if isinstance(x, str): + node = find(x) + else: + node = x + tmp.append(node) + return tmp + + tgt = to_nodes(self, self.target) + if not tgt: + tgt = [change_ext(src, '.o'), change_ext(src, '.h')] + cwd = tgt[0].parent.abspath() + task = self.create_task('run_halide_gen', src, tgt, cwd=cwd) + task.env.append_unique('HALIDE_ARGS', args) + if task.env.env == []: + task.env.env = {} + task.env.env.update(env) + task.env.HALIDE_ENV = " ".join(("%s=%s" % (k,v)) for (k,v) in sorted(env.items())) + task.env.HALIDE_ARGS = args + + try: + self.compiled_tasks.append(task) + except AttributeError: + self.compiled_tasks = [task] + self.source = [] + +def configure(conf): + if Options.options.halide_root is None: + conf.check_cfg(package='Halide', args='--cflags --libs') + else: + halide_root = Options.options.halide_root + conf.env.INCLUDES_HALIDE = [ os.path.join(halide_root, "include") ] + conf.env.LIBPATH_HALIDE = [ os.path.join(halide_root, "lib") ] + conf.env.LIB_HALIDE = ["Halide"] + + # You might want to add this, while upstream doesn't fix it + #conf.env.LIB_HALIDE += ['ncurses', 'dl', 'pthread'] + +def options(opt): + opt.add_option('--halide-root', + help="path to Halide include and lib files", + ) + diff --git a/third_party/waf/waflib/extras/haxe.py b/third_party/waf/waflib/extras/haxe.py new file mode 100644 index 0000000..4ff3745 --- /dev/null +++ b/third_party/waf/waflib/extras/haxe.py @@ -0,0 +1,154 @@ +import re + +from waflib import Utils, Task, Errors, Logs +from waflib.Configure import conf +from waflib.TaskGen import extension, taskgen_method + +HAXE_COMPILERS = { + 'JS': {'tgt': '--js', 'ext_out': ['.js']}, + 'LUA': {'tgt': '--lua', 'ext_out': ['.lua']}, + 'SWF': {'tgt': '--swf', 'ext_out': ['.swf']}, + 'NEKO': {'tgt': '--neko', 'ext_out': ['.n']}, + 'PHP': {'tgt': '--php', 'ext_out': ['.php']}, + 'CPP': {'tgt': '--cpp', 'ext_out': ['.h', '.cpp']}, + 'CPPIA': {'tgt': '--cppia', 'ext_out': ['.cppia']}, + 'CS': {'tgt': '--cs', 'ext_out': ['.cs']}, + 'JAVA': {'tgt': '--java', 'ext_out': ['.java']}, + 'JVM': {'tgt': '--jvm', 'ext_out': ['.jar']}, + 'PYTHON': {'tgt': '--python', 'ext_out': ['.py']}, + 'HL': {'tgt': '--hl', 'ext_out': ['.hl']}, + 'HLC': {'tgt': '--hl', 'ext_out': ['.h', '.c']}, +} + +@conf +def check_haxe_pkg(self, **kw): + self.find_program('haxelib') + libs = kw.get('libs') + if not libs or not (type(libs) == str or (type(libs) == list and all(isinstance(s, str) for s in libs))): + self.fatal('Specify correct libs value in ensure call') + return + fetch = kw.get('fetch') + if not fetch is None and not type(fetch) == bool: + self.fatal('Specify correct fetch value in ensure call') + + libs = [libs] if type(libs) == str else libs + halt = False + for lib in libs: + try: + self.start_msg('Checking for library %s' % lib) + output = self.cmd_and_log(self.env.HAXELIB + ['list', lib]) + except Errors.WafError: + self.end_msg(False) + self.fatal('Can\'t run haxelib list, ensuring halted') + return + + if lib in output: + self.end_msg(lib in output) + else: + if not fetch: + self.end_msg(False) + halt = True + continue + try: + status = self.exec_command(self.env.HAXELIB + ['install', lib]) + if status: + self.end_msg(False) + self.fatal('Can\'t get %s with haxelib, ensuring halted' % lib) + return + else: + self.end_msg('downloaded', color='YELLOW') + except Errors.WafError: + self.end_msg(False) + self.fatal('Can\'t run haxelib install, ensuring halted') + return + postfix = kw.get('uselib_store') or lib.upper() + self.env.append_unique('LIB_' + postfix, lib) + + if halt: + self.fatal('Can\'t find libraries in haxelib list, ensuring halted') + return + +class haxe(Task.Task): + vars = ['HAXE_VERSION', 'HAXE_FLAGS'] + ext_in = ['.hx'] + + def run(self): + cmd = self.env.HAXE + self.env.HAXE_FLAGS_DEFAULT + self.env.HAXE_FLAGS + return self.exec_command(cmd) + +for COMP in HAXE_COMPILERS: + # create runners for each compile target + type("haxe_" + COMP, (haxe,), {'ext_out': HAXE_COMPILERS[COMP]['ext_out']}) + +@taskgen_method +def init_haxe(self): + errmsg = '%s not found, specify correct value' + try: + compiler = HAXE_COMPILERS[self.compiler] + comp_tgt = compiler['tgt'] + comp_mod = '/main.c' if self.compiler == 'HLC' else '' + except (AttributeError, KeyError): + self.bld.fatal(errmsg % 'COMPILER' + ': ' + ', '.join(HAXE_COMPILERS.keys())) + return + + self.env.append_value( + 'HAXE_FLAGS', + [comp_tgt, self.path.get_bld().make_node(self.target + comp_mod).abspath()]) + if hasattr(self, 'use'): + if not (type(self.use) == str or type(self.use) == list): + self.bld.fatal(errmsg % 'USE') + return + self.use = [self.use] if type(self.use) == str else self.use + + for dep in self.use: + if self.env['LIB_' + dep]: + for lib in self.env['LIB_' + dep]: + self.env.append_value('HAXE_FLAGS', ['-lib', lib]) + + if hasattr(self, 'res'): + if not type(self.res) == str: + self.bld.fatal(errmsg % 'RES') + return + self.env.append_value('HAXE_FLAGS', ['-D', 'resourcesPath=%s' % self.res]) + +@extension('.hx') +def haxe_hook(self, node): + if len(self.source) > 1: + self.bld.fatal('Use separate task generators for multiple files') + return + + src = node + tgt = self.path.get_bld().find_or_declare(self.target) + + self.init_haxe() + self.create_task('haxe_' + self.compiler, src, tgt) + +@conf +def check_haxe(self, mini=None, maxi=None): + self.start_msg('Checking for haxe version') + try: + curr = re.search( + r'(\d+.?)+', + self.cmd_and_log(self.env.HAXE + ['-version'])).group() + except Errors.WafError: + self.end_msg(False) + self.fatal('Can\'t get haxe version') + return + + if mini and Utils.num2ver(curr) < Utils.num2ver(mini): + self.end_msg('wrong', color='RED') + self.fatal('%s is too old, need >= %s' % (curr, mini)) + return + if maxi and Utils.num2ver(curr) > Utils.num2ver(maxi): + self.end_msg('wrong', color='RED') + self.fatal('%s is too new, need <= %s' % (curr, maxi)) + return + self.end_msg(curr, color='GREEN') + self.env.HAXE_VERSION = curr + +def configure(self): + self.env.append_value( + 'HAXE_FLAGS_DEFAULT', + ['-D', 'no-compilation', '-cp', self.path.abspath()]) + Logs.warn('Default flags: %s' % ' '.join(self.env.HAXE_FLAGS_DEFAULT)) + self.find_program('haxe') diff --git a/third_party/waf/waflib/extras/javatest.py b/third_party/waf/waflib/extras/javatest.py new file mode 100755 index 0000000..76d40ed --- /dev/null +++ b/third_party/waf/waflib/extras/javatest.py @@ -0,0 +1,237 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Federico Pellegrin, 2019 (fedepell) + +""" +Provides Java Unit test support using :py:class:`waflib.Tools.waf_unit_test.utest` +task via the **javatest** feature. + +This gives the possibility to run unit test and have them integrated into the +standard waf unit test environment. It has been tested with TestNG and JUnit +but should be easily expandable to other frameworks given the flexibility of +ut_str provided by the standard waf unit test environment. + +The extra takes care also of managing non-java dependencies (ie. C/C++ libraries +using JNI or Python modules via JEP) and setting up the environment needed to run +them. + +Example usage: + +def options(opt): + opt.load('java waf_unit_test javatest') + +def configure(conf): + conf.load('java javatest') + +def build(bld): + + [ ... mainprog is built here ... ] + + bld(features = 'javac javatest', + srcdir = 'test/', + outdir = 'test', + sourcepath = ['test'], + classpath = [ 'src' ], + basedir = 'test', + use = ['JAVATEST', 'mainprog'], # mainprog is the program being tested in src/ + ut_str = 'java -cp ${CLASSPATH} ${JTRUNNER} ${SRC}', + jtest_source = bld.path.ant_glob('test/*.xml'), + ) + + +At command line the CLASSPATH where to find the testing environment and the +test runner (default TestNG) that will then be seen in the environment as +CLASSPATH_JAVATEST (then used for use) and JTRUNNER and can be used for +dependencies and ut_str generation. + +Example configure for TestNG: + waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar --jtrunner=org.testng.TestNG + or as default runner is TestNG: + waf configure --jtpath=/tmp/testng-6.12.jar:/tmp/jcommander-1.71.jar + +Example configure for JUnit: + waf configure --jtpath=/tmp/junit.jar --jtrunner=org.junit.runner.JUnitCore + +The runner class presence on the system is checked for at configuration stage. + +""" + +import os +from waflib import Task, TaskGen, Options, Errors, Utils, Logs +from waflib.Tools import ccroot + +JAR_RE = '**/*' + +def _process_use_rec(self, name): + """ + Recursively process ``use`` for task generator with name ``name``.. + Used by javatest_process_use. + """ + if name in self.javatest_use_not or name in self.javatest_use_seen: + return + try: + tg = self.bld.get_tgen_by_name(name) + except Errors.WafError: + self.javatest_use_not.add(name) + return + + self.javatest_use_seen.append(name) + tg.post() + + for n in self.to_list(getattr(tg, 'use', [])): + _process_use_rec(self, n) + +@TaskGen.feature('javatest') +@TaskGen.after_method('process_source', 'apply_link', 'use_javac_files') +def javatest_process_use(self): + """ + Process the ``use`` attribute which contains a list of task generator names and store + paths that later is used to populate the unit test runtime environment. + """ + self.javatest_use_not = set() + self.javatest_use_seen = [] + self.javatest_libpaths = [] # strings or Nodes + self.javatest_pypaths = [] # strings or Nodes + self.javatest_dep_nodes = [] + + names = self.to_list(getattr(self, 'use', [])) + for name in names: + _process_use_rec(self, name) + + def extend_unique(lst, varlst): + ext = [] + for x in varlst: + if x not in lst: + ext.append(x) + lst.extend(ext) + + # Collect type specific info needed to construct a valid runtime environment + # for the test. + for name in self.javatest_use_seen: + tg = self.bld.get_tgen_by_name(name) + + # Python-Java embedding crosstools such as JEP + if 'py' in tg.features: + # Python dependencies are added to PYTHONPATH + pypath = getattr(tg, 'install_from', tg.path) + + if 'buildcopy' in tg.features: + # Since buildcopy is used we assume that PYTHONPATH in build should be used, + # not source + extend_unique(self.javatest_pypaths, [pypath.get_bld().abspath()]) + + # Add buildcopy output nodes to dependencies + extend_unique(self.javatest_dep_nodes, [o for task in getattr(tg, 'tasks', []) for o in getattr(task, 'outputs', [])]) + else: + # If buildcopy is not used, depend on sources instead + extend_unique(self.javatest_dep_nodes, tg.source) + extend_unique(self.javatest_pypaths, [pypath.abspath()]) + + + if getattr(tg, 'link_task', None): + # For tasks with a link_task (C, C++, D et.c.) include their library paths: + if not isinstance(tg.link_task, ccroot.stlink_task): + extend_unique(self.javatest_dep_nodes, tg.link_task.outputs) + extend_unique(self.javatest_libpaths, tg.link_task.env.LIBPATH) + + if 'pyext' in tg.features: + # If the taskgen is extending Python we also want to add the interpreter libpath. + extend_unique(self.javatest_libpaths, tg.link_task.env.LIBPATH_PYEXT) + else: + # Only add to libpath if the link task is not a Python extension + extend_unique(self.javatest_libpaths, [tg.link_task.outputs[0].parent.abspath()]) + + if 'javac' in tg.features or 'jar' in tg.features: + if hasattr(tg, 'jar_task'): + # For Java JAR tasks depend on generated JAR + extend_unique(self.javatest_dep_nodes, tg.jar_task.outputs) + else: + # For Java non-JAR ones we need to glob generated files (Java output files are not predictable) + if hasattr(tg, 'outdir'): + base_node = tg.outdir + else: + base_node = tg.path.get_bld() + + self.javatest_dep_nodes.extend([dx for dx in base_node.ant_glob(JAR_RE, remove=False, quiet=True)]) + + + +@TaskGen.feature('javatest') +@TaskGen.after_method('apply_java', 'use_javac_files', 'set_classpath', 'javatest_process_use') +def make_javatest(self): + """ + Creates a ``utest`` task with a populated environment for Java Unit test execution + + """ + tsk = self.create_task('utest') + tsk.set_run_after(self.javac_task) + + # Dependencies from recursive use analysis + tsk.dep_nodes.extend(self.javatest_dep_nodes) + + # Put test input files as waf_unit_test relies on that for some prints and log generation + # If jtest_source is there, this is specially useful for passing XML for TestNG + # that contain test specification, use that as inputs, otherwise test sources + if getattr(self, 'jtest_source', None): + tsk.inputs = self.to_nodes(self.jtest_source) + else: + if self.javac_task.srcdir[0].exists(): + tsk.inputs = self.javac_task.srcdir[0].ant_glob('**/*.java', remove=False) + + if getattr(self, 'ut_str', None): + self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) + tsk.vars = lst + tsk.vars + + if getattr(self, 'ut_cwd', None): + if isinstance(self.ut_cwd, str): + # we want a Node instance + if os.path.isabs(self.ut_cwd): + self.ut_cwd = self.bld.root.make_node(self.ut_cwd) + else: + self.ut_cwd = self.path.make_node(self.ut_cwd) + else: + self.ut_cwd = self.bld.bldnode + + # Get parent CLASSPATH and add output dir of test, we run from wscript dir + # We have to change it from list to the standard java -cp format (: separated) + tsk.env.CLASSPATH = ':'.join(self.env.CLASSPATH) + ':' + self.outdir.abspath() + + if not self.ut_cwd.exists(): + self.ut_cwd.mkdir() + + if not hasattr(self, 'ut_env'): + self.ut_env = dict(os.environ) + def add_paths(var, lst): + # Add list of paths to a variable, lst can contain strings or nodes + lst = [ str(n) for n in lst ] + Logs.debug("ut: %s: Adding paths %s=%s", self, var, lst) + self.ut_env[var] = os.pathsep.join(lst) + os.pathsep + self.ut_env.get(var, '') + + add_paths('PYTHONPATH', self.javatest_pypaths) + + if Utils.is_win32: + add_paths('PATH', self.javatest_libpaths) + elif Utils.unversioned_sys_platform() == 'darwin': + add_paths('DYLD_LIBRARY_PATH', self.javatest_libpaths) + add_paths('LD_LIBRARY_PATH', self.javatest_libpaths) + else: + add_paths('LD_LIBRARY_PATH', self.javatest_libpaths) + +def configure(ctx): + cp = ctx.env.CLASSPATH or '.' + if getattr(Options.options, 'jtpath', None): + ctx.env.CLASSPATH_JAVATEST = getattr(Options.options, 'jtpath').split(':') + cp += ':' + getattr(Options.options, 'jtpath') + + if getattr(Options.options, 'jtrunner', None): + ctx.env.JTRUNNER = getattr(Options.options, 'jtrunner') + + if ctx.check_java_class(ctx.env.JTRUNNER, with_classpath=cp): + ctx.fatal('Could not run test class %r' % ctx.env.JTRUNNER) + +def options(opt): + opt.add_option('--jtpath', action='store', default='', dest='jtpath', + help='Path to jar(s) needed for javatest execution, colon separated, if not in the system CLASSPATH') + opt.add_option('--jtrunner', action='store', default='org.testng.TestNG', dest='jtrunner', + help='Class to run javatest test [default: org.testng.TestNG]') + diff --git a/third_party/waf/waflib/extras/kde4.py b/third_party/waf/waflib/extras/kde4.py new file mode 100644 index 0000000..aed9bfb --- /dev/null +++ b/third_party/waf/waflib/extras/kde4.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2010 (ita) + +""" +Support for the KDE4 libraries and msgfmt +""" + +import os, re +from waflib import Task, Utils +from waflib.TaskGen import feature + +@feature('msgfmt') +def apply_msgfmt(self): + """ + Process all languages to create .mo files and to install them:: + + def build(bld): + bld(features='msgfmt', langs='es de fr', appname='myapp', install_path='${KDE4_LOCALE_INSTALL_DIR}') + """ + for lang in self.to_list(self.langs): + node = self.path.find_resource(lang+'.po') + task = self.create_task('msgfmt', node, node.change_ext('.mo')) + + langname = lang.split('/') + langname = langname[-1] + + inst = getattr(self, 'install_path', '${KDE4_LOCALE_INSTALL_DIR}') + + self.add_install_as( + inst_to = inst + os.sep + langname + os.sep + 'LC_MESSAGES' + os.sep + getattr(self, 'appname', 'set_your_appname') + '.mo', + inst_from = task.outputs[0], + chmod = getattr(self, 'chmod', Utils.O644)) + +class msgfmt(Task.Task): + """ + Transform .po files into .mo files + """ + color = 'BLUE' + run_str = '${MSGFMT} ${SRC} -o ${TGT}' + +def configure(self): + """ + Detect kde4-config and set various variables for the *use* system:: + + def options(opt): + opt.load('compiler_cxx kde4') + def configure(conf): + conf.load('compiler_cxx kde4') + def build(bld): + bld.program(source='main.c', target='app', use='KDECORE KIO KHTML') + """ + kdeconfig = self.find_program('kde4-config') + prefix = self.cmd_and_log(kdeconfig + ['--prefix']).strip() + fname = '%s/share/apps/cmake/modules/KDELibsDependencies.cmake' % prefix + try: + os.stat(fname) + except OSError: + fname = '%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake' % prefix + try: + os.stat(fname) + except OSError: + self.fatal('could not open %s' % fname) + + try: + txt = Utils.readf(fname) + except EnvironmentError: + self.fatal('could not read %s' % fname) + + txt = txt.replace('\\\n', '\n') + fu = re.compile('#(.*)\n') + txt = fu.sub('', txt) + + setregexp = re.compile(r'([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)') + found = setregexp.findall(txt) + + for (_, key, val) in found: + #print key, val + self.env[key] = val + + # well well, i could just write an interpreter for cmake files + self.env['LIB_KDECORE']= ['kdecore'] + self.env['LIB_KDEUI'] = ['kdeui'] + self.env['LIB_KIO'] = ['kio'] + self.env['LIB_KHTML'] = ['khtml'] + self.env['LIB_KPARTS'] = ['kparts'] + + self.env['LIBPATH_KDECORE'] = [os.path.join(self.env.KDE4_LIB_INSTALL_DIR, 'kde4', 'devel'), self.env.KDE4_LIB_INSTALL_DIR] + self.env['INCLUDES_KDECORE'] = [self.env['KDE4_INCLUDE_INSTALL_DIR']] + self.env.append_value('INCLUDES_KDECORE', [self.env['KDE4_INCLUDE_INSTALL_DIR']+ os.sep + 'KDE']) + + self.find_program('msgfmt', var='MSGFMT') + diff --git a/third_party/waf/waflib/extras/local_rpath.py b/third_party/waf/waflib/extras/local_rpath.py new file mode 100644 index 0000000..e3923d9 --- /dev/null +++ b/third_party/waf/waflib/extras/local_rpath.py @@ -0,0 +1,21 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 (ita) + +import copy +from waflib.TaskGen import after_method, feature + +@after_method('propagate_uselib_vars') +@feature('cprogram', 'cshlib', 'cxxprogram', 'cxxshlib', 'fcprogram', 'fcshlib') +def add_rpath_stuff(self): + all = copy.copy(self.to_list(getattr(self, 'use', []))) + while all: + name = all.pop() + try: + tg = self.bld.get_tgen_by_name(name) + except: + continue + if hasattr(tg, 'link_task'): + self.env.append_value('RPATH', tg.link_task.outputs[0].parent.abspath()) + all.extend(self.to_list(getattr(tg, 'use', []))) + diff --git a/third_party/waf/waflib/extras/make.py b/third_party/waf/waflib/extras/make.py new file mode 100644 index 0000000..933d9ca --- /dev/null +++ b/third_party/waf/waflib/extras/make.py @@ -0,0 +1,142 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 (ita) + +""" +A make-like way of executing the build, following the relationships between inputs/outputs + +This algorithm will lead to slower builds, will not be as flexible as "waf build", but +it might be useful for building data files (?) + +It is likely to break in the following cases: +- files are created dynamically (no inputs or outputs) +- headers +- building two files from different groups +""" + +import re +from waflib import Options, Task +from waflib.Build import BuildContext + +class MakeContext(BuildContext): + '''executes tasks in a step-by-step manner, following dependencies between inputs/outputs''' + cmd = 'make' + fun = 'build' + + def __init__(self, **kw): + super(MakeContext, self).__init__(**kw) + self.files = Options.options.files + + def get_build_iterator(self): + if not self.files: + while 1: + yield super(MakeContext, self).get_build_iterator() + + for g in self.groups: + for tg in g: + try: + f = tg.post + except AttributeError: + pass + else: + f() + + provides = {} + uses = {} + all_tasks = [] + tasks = [] + for pat in self.files.split(','): + matcher = self.get_matcher(pat) + for tg in g: + if isinstance(tg, Task.Task): + lst = [tg] + else: + lst = tg.tasks + for tsk in lst: + all_tasks.append(tsk) + + do_exec = False + for node in tsk.inputs: + try: + uses[node].append(tsk) + except: + uses[node] = [tsk] + + if matcher(node, output=False): + do_exec = True + break + + for node in tsk.outputs: + try: + provides[node].append(tsk) + except: + provides[node] = [tsk] + + if matcher(node, output=True): + do_exec = True + break + if do_exec: + tasks.append(tsk) + + # so we have the tasks that we need to process, the list of all tasks, + # the map of the tasks providing nodes, and the map of tasks using nodes + + if not tasks: + # if there are no tasks matching, return everything in the current group + result = all_tasks + else: + # this is like a big filter... + result = set() + seen = set() + cur = set(tasks) + while cur: + result |= cur + tosee = set() + for tsk in cur: + for node in tsk.inputs: + if node in seen: + continue + seen.add(node) + tosee |= set(provides.get(node, [])) + cur = tosee + result = list(result) + + Task.set_file_constraints(result) + Task.set_precedence_constraints(result) + yield result + + while 1: + yield [] + + def get_matcher(self, pat): + # this returns a function + inn = True + out = True + if pat.startswith('in:'): + out = False + pat = pat.replace('in:', '') + elif pat.startswith('out:'): + inn = False + pat = pat.replace('out:', '') + + anode = self.root.find_node(pat) + pattern = None + if not anode: + if not pat.startswith('^'): + pat = '^.+?%s' % pat + if not pat.endswith('$'): + pat = '%s$' % pat + pattern = re.compile(pat) + + def match(node, output): + if output and not out: + return False + if not output and not inn: + return False + + if anode: + return anode == node + else: + return pattern.match(node.abspath()) + return match + diff --git a/third_party/waf/waflib/extras/midl.py b/third_party/waf/waflib/extras/midl.py new file mode 100644 index 0000000..43e6cf9 --- /dev/null +++ b/third_party/waf/waflib/extras/midl.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python +# Issue 1185 ultrix gmail com + +""" +Microsoft Interface Definition Language support. Given ComObject.idl, this tool +will generate ComObject.tlb ComObject_i.h ComObject_i.c ComObject_p.c and dlldata.c + +To declare targets using midl:: + + def configure(conf): + conf.load('msvc') + conf.load('midl') + + def build(bld): + bld( + features='c cshlib', + # Note: ComObject_i.c is generated from ComObject.idl + source = 'main.c ComObject.idl ComObject_i.c', + target = 'ComObject.dll') +""" + +from waflib import Task, Utils +from waflib.TaskGen import feature, before_method +import os + +def configure(conf): + conf.find_program(['midl'], var='MIDL') + + conf.env.MIDLFLAGS = [ + '/nologo', + '/D', + '_DEBUG', + '/W1', + '/char', + 'signed', + '/Oicf', + ] + +@feature('c', 'cxx') +@before_method('process_source') +def idl_file(self): + # Do this before process_source so that the generated header can be resolved + # when scanning source dependencies. + idl_nodes = [] + src_nodes = [] + for node in Utils.to_list(self.source): + if str(node).endswith('.idl'): + idl_nodes.append(node) + else: + src_nodes.append(node) + + for node in self.to_nodes(idl_nodes): + t = node.change_ext('.tlb') + h = node.change_ext('_i.h') + c = node.change_ext('_i.c') + p = node.change_ext('_p.c') + d = node.parent.find_or_declare('dlldata.c') + self.create_task('midl', node, [t, h, c, p, d]) + + self.source = src_nodes + +class midl(Task.Task): + """ + Compile idl files + """ + color = 'YELLOW' + run_str = '${MIDL} ${MIDLFLAGS} ${CPPPATH_ST:INCLUDES} /tlb ${TGT[0].bldpath()} /header ${TGT[1].bldpath()} /iid ${TGT[2].bldpath()} /proxy ${TGT[3].bldpath()} /dlldata ${TGT[4].bldpath()} ${SRC}' + before = ['winrc'] + diff --git a/third_party/waf/waflib/extras/msvc_pdb.py b/third_party/waf/waflib/extras/msvc_pdb.py new file mode 100644 index 0000000..077656b --- /dev/null +++ b/third_party/waf/waflib/extras/msvc_pdb.py @@ -0,0 +1,46 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Rafaël Kooi 2019 + +from waflib import TaskGen + +@TaskGen.feature('c', 'cxx', 'fc') +@TaskGen.after_method('propagate_uselib_vars') +def add_pdb_per_object(self): + """For msvc/fortran, specify a unique compile pdb per object, to work + around LNK4099. Flags are updated with a unique /Fd flag based on the + task output name. This is separate from the link pdb. + """ + if not hasattr(self, 'compiled_tasks'): + return + + link_task = getattr(self, 'link_task', None) + + for task in self.compiled_tasks: + if task.inputs and task.inputs[0].name.lower().endswith('.rc'): + continue + + add_pdb = False + for flagname in ('CFLAGS', 'CXXFLAGS', 'FCFLAGS'): + # several languages may be used at once + for flag in task.env[flagname]: + if flag[1:].lower() == 'zi': + add_pdb = True + break + + if add_pdb: + node = task.outputs[0].change_ext('.pdb') + pdb_flag = '/Fd:' + node.abspath() + + for flagname in ('CFLAGS', 'CXXFLAGS', 'FCFLAGS'): + buf = [pdb_flag] + for flag in task.env[flagname]: + if flag[1:3] == 'Fd' or flag[1:].lower() == 'fs' or flag[1:].lower() == 'mp': + continue + buf.append(flag) + task.env[flagname] = buf + + if link_task and not node in link_task.dep_nodes: + link_task.dep_nodes.append(node) + if not node in task.outputs: + task.outputs.append(node) diff --git a/third_party/waf/waflib/extras/msvcdeps.py b/third_party/waf/waflib/extras/msvcdeps.py new file mode 100644 index 0000000..e8985bd --- /dev/null +++ b/third_party/waf/waflib/extras/msvcdeps.py @@ -0,0 +1,294 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Copyright Garmin International or its subsidiaries, 2012-2013 + +''' +Off-load dependency scanning from Python code to MSVC compiler + +This tool is safe to load in any environment; it will only activate the +MSVC exploits when it finds that a particular taskgen uses MSVC to +compile. + +Empirical testing shows about a 10% execution time savings from using +this tool as compared to c_preproc. + +The technique of gutting scan() and pushing the dependency calculation +down to post_run() is cribbed from gccdeps.py. + +This affects the cxx class, so make sure to load Qt5 after this tool. + +Usage:: + + def options(opt): + opt.load('compiler_cxx') + def configure(conf): + conf.load('compiler_cxx msvcdeps') +''' + +import os, sys, tempfile, threading + +from waflib import Context, Errors, Logs, Task, Utils +from waflib.Tools import c_preproc, c, cxx, msvc +from waflib.TaskGen import feature, before_method + +lock = threading.Lock() + +PREPROCESSOR_FLAG = '/showIncludes' +INCLUDE_PATTERN = 'Note: including file:' + +# Extensible by outside tools +supported_compilers = ['msvc'] + +@feature('c', 'cxx') +@before_method('process_source') +def apply_msvcdeps_flags(taskgen): + if taskgen.env.CC_NAME not in supported_compilers: + return + + for flag in ('CFLAGS', 'CXXFLAGS'): + if taskgen.env.get_flat(flag).find(PREPROCESSOR_FLAG) < 0: + taskgen.env.append_value(flag, PREPROCESSOR_FLAG) + + +def get_correct_path_case(base_path, path): + ''' + Return a case-corrected version of ``path`` by searching the filesystem for + ``path``, relative to ``base_path``, using the case returned by the filesystem. + ''' + components = Utils.split_path(path) + + corrected_path = '' + if os.path.isabs(path): + corrected_path = components.pop(0).upper() + os.sep + + for part in components: + part = part.lower() + search_path = os.path.join(base_path, corrected_path) + if part == '..': + corrected_path = os.path.join(corrected_path, part) + search_path = os.path.normpath(search_path) + continue + + for item in sorted(os.listdir(search_path)): + if item.lower() == part: + corrected_path = os.path.join(corrected_path, item) + break + else: + raise ValueError("Can't find %r in %r" % (part, search_path)) + + return corrected_path + + +def path_to_node(base_node, path, cached_nodes): + ''' + Take the base node and the path and return a node + Results are cached because searching the node tree is expensive + The following code is executed by threads, it is not safe, so a lock is needed... + ''' + # normalize the path to remove parent path components (..) + path = os.path.normpath(path) + + # normalize the path case to increase likelihood of a cache hit + node_lookup_key = (base_node, os.path.normcase(path)) + + try: + node = cached_nodes[node_lookup_key] + except KeyError: + # retry with lock on cache miss + with lock: + try: + node = cached_nodes[node_lookup_key] + except KeyError: + path = get_correct_path_case(base_node.abspath(), path) + node = cached_nodes[node_lookup_key] = base_node.find_node(path) + + return node + +def post_run(self): + if self.env.CC_NAME not in supported_compilers: + return super(self.derived_msvcdeps, self).post_run() + + # TODO this is unlikely to work with netcache + if getattr(self, 'cached', None): + return Task.Task.post_run(self) + + resolved_nodes = [] + unresolved_names = [] + bld = self.generator.bld + + # Dynamically bind to the cache + try: + cached_nodes = bld.cached_nodes + except AttributeError: + cached_nodes = bld.cached_nodes = {} + + for path in self.msvcdeps_paths: + node = None + if os.path.isabs(path): + node = path_to_node(bld.root, path, cached_nodes) + else: + # when calling find_resource, make sure the path does not begin with '..' + base_node = bld.bldnode + path = [k for k in Utils.split_path(path) if k and k != '.'] + while path[0] == '..': + path.pop(0) + base_node = base_node.parent + path = os.sep.join(path) + + node = path_to_node(base_node, path, cached_nodes) + + if not node: + raise ValueError('could not find %r for %r' % (path, self)) + else: + if not c_preproc.go_absolute: + if not (node.is_child_of(bld.srcnode) or node.is_child_of(bld.bldnode)): + # System library + Logs.debug('msvcdeps: Ignoring system include %r', node) + continue + + if id(node) == id(self.inputs[0]): + # ignore the source file, it is already in the dependencies + # this way, successful config tests may be retrieved from the cache + continue + + resolved_nodes.append(node) + + Logs.debug('deps: msvcdeps for %s returned %s', self, resolved_nodes) + + bld.node_deps[self.uid()] = resolved_nodes + bld.raw_deps[self.uid()] = unresolved_names + + try: + del self.cache_sig + except AttributeError: + pass + + Task.Task.post_run(self) + +def scan(self): + if self.env.CC_NAME not in supported_compilers: + return super(self.derived_msvcdeps, self).scan() + + resolved_nodes = self.generator.bld.node_deps.get(self.uid(), []) + unresolved_names = [] + return (resolved_nodes, unresolved_names) + +def sig_implicit_deps(self): + if self.env.CC_NAME not in supported_compilers: + return super(self.derived_msvcdeps, self).sig_implicit_deps() + bld = self.generator.bld + + try: + return self.compute_sig_implicit_deps() + except Errors.TaskNotReady: + raise ValueError("Please specify the build order precisely with msvcdeps (c/c++ tasks)") + except EnvironmentError: + # If a file is renamed, assume the dependencies are stale and must be recalculated + for x in bld.node_deps.get(self.uid(), []): + if not x.is_bld() and not x.exists(): + try: + del x.parent.children[x.name] + except KeyError: + pass + + key = self.uid() + bld.node_deps[key] = [] + bld.raw_deps[key] = [] + return Utils.SIG_NIL + +def exec_command(self, cmd, **kw): + if self.env.CC_NAME not in supported_compilers: + return super(self.derived_msvcdeps, self).exec_command(cmd, **kw) + + if not 'cwd' in kw: + kw['cwd'] = self.get_cwd() + + if self.env.PATH: + env = kw['env'] = dict(kw.get('env') or self.env.env or os.environ) + env['PATH'] = self.env.PATH if isinstance(self.env.PATH, str) else os.pathsep.join(self.env.PATH) + + # The Visual Studio IDE adds an environment variable that causes + # the MS compiler to send its textual output directly to the + # debugging window rather than normal stdout/stderr. + # + # This is unrecoverably bad for this tool because it will cause + # all the dependency scanning to see an empty stdout stream and + # assume that the file being compiled uses no headers. + # + # See http://blogs.msdn.com/b/freik/archive/2006/04/05/569025.aspx + # + # Attempting to repair the situation by deleting the offending + # envvar at this point in tool execution will not be good enough-- + # its presence poisons the 'waf configure' step earlier. We just + # want to put a sanity check here in order to help developers + # quickly diagnose the issue if an otherwise-good Waf tree + # is then executed inside the MSVS IDE. + assert 'VS_UNICODE_OUTPUT' not in kw['env'] + + cmd, args = self.split_argfile(cmd) + try: + (fd, tmp) = tempfile.mkstemp() + os.write(fd, '\r\n'.join(args).encode()) + os.close(fd) + + self.msvcdeps_paths = [] + kw['env'] = kw.get('env', os.environ.copy()) + kw['cwd'] = kw.get('cwd', os.getcwd()) + kw['quiet'] = Context.STDOUT + kw['output'] = Context.STDOUT + + out = [] + if Logs.verbose: + Logs.debug('argfile: @%r -> %r', tmp, args) + try: + raw_out = self.generator.bld.cmd_and_log(cmd + ['@' + tmp], **kw) + ret = 0 + except Errors.WafError as e: + # Use e.msg if e.stdout is not set + raw_out = getattr(e, 'stdout', e.msg) + + # Return non-zero error code even if we didn't + # get one from the exception object + ret = getattr(e, 'returncode', 1) + + Logs.debug('msvcdeps: Running for: %s' % self.inputs[0]) + for line in raw_out.splitlines(): + if line.startswith(INCLUDE_PATTERN): + # Only strip whitespace after log to preserve + # dependency structure in debug output + inc_path = line[len(INCLUDE_PATTERN):] + Logs.debug('msvcdeps: Regex matched %s', inc_path) + self.msvcdeps_paths.append(inc_path.strip()) + else: + out.append(line) + + # Pipe through the remaining stdout content (not related to /showIncludes) + if self.generator.bld.logger: + self.generator.bld.logger.debug('out: %s' % os.linesep.join(out)) + else: + sys.stdout.write(os.linesep.join(out) + os.linesep) + + return ret + finally: + try: + os.remove(tmp) + except OSError: + # anti-virus and indexers can keep files open -_- + pass + + +def wrap_compiled_task(classname): + derived_class = type(classname, (Task.classes[classname],), {}) + derived_class.derived_msvcdeps = derived_class + derived_class.post_run = post_run + derived_class.scan = scan + derived_class.sig_implicit_deps = sig_implicit_deps + derived_class.exec_command = exec_command + +for k in ('c', 'cxx'): + if k in Task.classes: + wrap_compiled_task(k) + +def options(opt): + raise ValueError('Do not load msvcdeps options') + diff --git a/third_party/waf/waflib/extras/msvs.py b/third_party/waf/waflib/extras/msvs.py new file mode 100644 index 0000000..f987bb5 --- /dev/null +++ b/third_party/waf/waflib/extras/msvs.py @@ -0,0 +1,1052 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Avalanche Studios 2009-2011 +# Thomas Nagy 2011 + +""" +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +1. Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +3. The name of the author may not be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, +INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) +HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, +STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING +IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" + +""" +To add this tool to your project: +def options(conf): + opt.load('msvs') + +It can be a good idea to add the sync_exec tool too. + +To generate solution files: +$ waf configure msvs + +To customize the outputs, provide subclasses in your wscript files:: + + from waflib.extras import msvs + class vsnode_target(msvs.vsnode_target): + def get_build_command(self, props): + # likely to be required + return "waf.bat build" + def collect_source(self): + # likely to be required + ... + class msvs_bar(msvs.msvs_generator): + def init(self): + msvs.msvs_generator.init(self) + self.vsnode_target = vsnode_target + +The msvs class re-uses the same build() function for reading the targets (task generators), +you may therefore specify msvs settings on the context object:: + + def build(bld): + bld.solution_name = 'foo.sln' + bld.waf_command = 'waf.bat' + bld.projects_dir = bld.srcnode.make_node('.depproj') + bld.projects_dir.mkdir() + +For visual studio 2008, the command is called 'msvs2008', and the classes +such as vsnode_target are wrapped by a decorator class 'wrap_2008' to +provide special functionality. + +To customize platform toolsets, pass additional parameters, for example:: + + class msvs_2013(msvs.msvs_generator): + cmd = 'msvs2013' + numver = '13.00' + vsver = '2013' + platform_toolset_ver = 'v120' + +ASSUMPTIONS: +* a project can be either a directory or a target, vcxproj files are written only for targets that have source files +* each project is a vcxproj file, therefore the project uuid needs only to be a hash of the absolute path +""" + +import os, re, sys +import uuid # requires python 2.5 +from waflib.Build import BuildContext +from waflib import Utils, TaskGen, Logs, Task, Context, Node, Options + +HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' + +PROJECT_TEMPLATE = r'''<?xml version="1.0" encoding="UTF-8"?> +<Project DefaultTargets="Build" ToolsVersion="4.0" + xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + + <ItemGroup Label="ProjectConfigurations"> + ${for b in project.build_properties} + <ProjectConfiguration Include="${b.configuration}|${b.platform}"> + <Configuration>${b.configuration}</Configuration> + <Platform>${b.platform}</Platform> + </ProjectConfiguration> + ${endfor} + </ItemGroup> + + <PropertyGroup Label="Globals"> + <ProjectGuid>{${project.uuid}}</ProjectGuid> + <Keyword>MakeFileProj</Keyword> + <ProjectName>${project.name}</ProjectName> + </PropertyGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" /> + + ${for b in project.build_properties} + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'" Label="Configuration"> + <ConfigurationType>Makefile</ConfigurationType> + <OutDir>${b.outdir}</OutDir> + <PlatformToolset>${project.platform_toolset_ver}</PlatformToolset> + </PropertyGroup> + ${endfor} + + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <ImportGroup Label="ExtensionSettings"> + </ImportGroup> + + ${for b in project.build_properties} + <ImportGroup Label="PropertySheets" Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'"> + <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" /> + </ImportGroup> + ${endfor} + + ${for b in project.build_properties} + <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'"> + <NMakeBuildCommandLine>${xml:project.get_build_command(b)}</NMakeBuildCommandLine> + <NMakeReBuildCommandLine>${xml:project.get_rebuild_command(b)}</NMakeReBuildCommandLine> + <NMakeCleanCommandLine>${xml:project.get_clean_command(b)}</NMakeCleanCommandLine> + <NMakeIncludeSearchPath>${xml:b.includes_search_path}</NMakeIncludeSearchPath> + <NMakePreprocessorDefinitions>${xml:b.preprocessor_definitions};$(NMakePreprocessorDefinitions)</NMakePreprocessorDefinitions> + <IncludePath>${xml:b.includes_search_path}</IncludePath> + <ExecutablePath>$(ExecutablePath)</ExecutablePath> + + ${if getattr(b, 'output_file', None)} + <NMakeOutput>${xml:b.output_file}</NMakeOutput> + ${endif} + ${if getattr(b, 'deploy_dir', None)} + <RemoteRoot>${xml:b.deploy_dir}</RemoteRoot> + ${endif} + </PropertyGroup> + ${endfor} + + ${for b in project.build_properties} + ${if getattr(b, 'deploy_dir', None)} + <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='${b.configuration}|${b.platform}'"> + <Deploy> + <DeploymentType>CopyToHardDrive</DeploymentType> + </Deploy> + </ItemDefinitionGroup> + ${endif} + ${endfor} + + <ItemGroup> + ${for x in project.source} + <${project.get_key(x)} Include='${x.win32path()}' /> + ${endfor} + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <ImportGroup Label="ExtensionTargets"> + </ImportGroup> +</Project> +''' + +FILTER_TEMPLATE = '''<?xml version="1.0" encoding="UTF-8"?> +<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <ItemGroup> + ${for x in project.source} + <${project.get_key(x)} Include="${x.win32path()}"> + <Filter>${project.get_filter_name(x.parent)}</Filter> + </${project.get_key(x)}> + ${endfor} + </ItemGroup> + <ItemGroup> + ${for x in project.dirs()} + <Filter Include="${project.get_filter_name(x)}"> + <UniqueIdentifier>{${project.make_uuid(x.win32path())}}</UniqueIdentifier> + </Filter> + ${endfor} + </ItemGroup> +</Project> +''' + +PROJECT_2008_TEMPLATE = r'''<?xml version="1.0" encoding="UTF-8"?> +<VisualStudioProject ProjectType="Visual C++" Version="9,00" + Name="${xml: project.name}" ProjectGUID="{${project.uuid}}" + Keyword="MakeFileProj" + TargetFrameworkVersion="196613"> + <Platforms> + ${if project.build_properties} + ${for b in project.build_properties} + <Platform Name="${xml: b.platform}" /> + ${endfor} + ${else} + <Platform Name="Win32" /> + ${endif} + </Platforms> + <ToolFiles> + </ToolFiles> + <Configurations> + ${if project.build_properties} + ${for b in project.build_properties} + <Configuration + Name="${xml: b.configuration}|${xml: b.platform}" + IntermediateDirectory="$ConfigurationName" + OutputDirectory="${xml: b.outdir}" + ConfigurationType="0"> + <Tool + Name="VCNMakeTool" + BuildCommandLine="${xml: project.get_build_command(b)}" + ReBuildCommandLine="${xml: project.get_rebuild_command(b)}" + CleanCommandLine="${xml: project.get_clean_command(b)}" + ${if getattr(b, 'output_file', None)} + Output="${xml: b.output_file}" + ${endif} + PreprocessorDefinitions="${xml: b.preprocessor_definitions}" + IncludeSearchPath="${xml: b.includes_search_path}" + ForcedIncludes="" + ForcedUsingAssemblies="" + AssemblySearchPath="" + CompileAsManaged="" + /> + </Configuration> + ${endfor} + ${else} + <Configuration Name="Release|Win32" > + </Configuration> + ${endif} + </Configurations> + <References> + </References> + <Files> +${project.display_filter()} + </Files> +</VisualStudioProject> +''' + +SOLUTION_TEMPLATE = '''Microsoft Visual Studio Solution File, Format Version ${project.numver} +# Visual Studio ${project.vsver} +${for p in project.all_projects} +Project("{${p.ptype()}}") = "${p.name}", "${p.title}", "{${p.uuid}}" +EndProject${endfor} +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + ${if project.all_projects} + ${for (configuration, platform) in project.all_projects[0].ctx.project_configurations()} + ${configuration}|${platform} = ${configuration}|${platform} + ${endfor} + ${endif} + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + ${for p in project.all_projects} + ${if hasattr(p, 'source')} + ${for b in p.build_properties} + {${p.uuid}}.${b.configuration}|${b.platform}.ActiveCfg = ${b.configuration}|${b.platform} + ${if getattr(p, 'is_active', None)} + {${p.uuid}}.${b.configuration}|${b.platform}.Build.0 = ${b.configuration}|${b.platform} + ${endif} + ${if getattr(p, 'is_deploy', None)} + {${p.uuid}}.${b.configuration}|${b.platform}.Deploy.0 = ${b.configuration}|${b.platform} + ${endif} + ${endfor} + ${endif} + ${endfor} + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(NestedProjects) = preSolution + ${for p in project.all_projects} + ${if p.parent} + {${p.uuid}} = {${p.parent.uuid}} + ${endif} + ${endfor} + EndGlobalSection +EndGlobal +''' + +COMPILE_TEMPLATE = '''def f(project): + lst = [] + def xml_escape(value): + return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") + + %s + + #f = open('cmd.txt', 'w') + #f.write(str(lst)) + #f.close() + return ''.join(lst) +''' +reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<code>[^}]*?)\})", re.M) +def compile_template(line): + """ + Compile a template expression into a python function (like jsps, but way shorter) + """ + extr = [] + def repl(match): + g = match.group + if g('dollar'): + return "$" + elif g('backslash'): + return "\\" + elif g('subst'): + extr.append(g('code')) + return "<<|@|>>" + return None + + line2 = reg_act.sub(repl, line) + params = line2.split('<<|@|>>') + assert(extr) + + + indent = 0 + buf = [] + app = buf.append + + def app(txt): + buf.append(indent * '\t' + txt) + + for x in range(len(extr)): + if params[x]: + app("lst.append(%r)" % params[x]) + + f = extr[x] + if f.startswith(('if', 'for')): + app(f + ':') + indent += 1 + elif f.startswith('py:'): + app(f[3:]) + elif f.startswith(('endif', 'endfor')): + indent -= 1 + elif f.startswith(('else', 'elif')): + indent -= 1 + app(f + ':') + indent += 1 + elif f.startswith('xml:'): + app('lst.append(xml_escape(%s))' % f[4:]) + else: + #app('lst.append((%s) or "cannot find %s")' % (f, f)) + app('lst.append(%s)' % f) + + if extr: + if params[-1]: + app("lst.append(%r)" % params[-1]) + + fun = COMPILE_TEMPLATE % "\n\t".join(buf) + #print(fun) + return Task.funex(fun) + + +re_blank = re.compile('(\n|\r|\\s)*\n', re.M) +def rm_blank_lines(txt): + txt = re_blank.sub('\r\n', txt) + return txt + +BOM = '\xef\xbb\xbf' +try: + BOM = bytes(BOM, 'latin-1') # python 3 +except TypeError: + pass + +def stealth_write(self, data, flags='wb'): + try: + unicode + except NameError: + data = data.encode('utf-8') # python 3 + else: + data = data.decode(sys.getfilesystemencoding(), 'replace') + data = data.encode('utf-8') + + if self.name.endswith(('.vcproj', '.vcxproj')): + data = BOM + data + + try: + txt = self.read(flags='rb') + if txt != data: + raise ValueError('must write') + except (IOError, ValueError): + self.write(data, flags=flags) + else: + Logs.debug('msvs: skipping %s', self.win32path()) +Node.Node.stealth_write = stealth_write + +re_win32 = re.compile(r'^([/\\]cygdrive)?[/\\]([a-z])([^a-z0-9_-].*)', re.I) +def win32path(self): + p = self.abspath() + m = re_win32.match(p) + if m: + return "%s:%s" % (m.group(2).upper(), m.group(3)) + return p +Node.Node.win32path = win32path + +re_quote = re.compile("[^a-zA-Z0-9-]") +def quote(s): + return re_quote.sub("_", s) + +def xml_escape(value): + return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") + +def make_uuid(v, prefix = None): + """ + simple utility function + """ + if isinstance(v, dict): + keys = list(v.keys()) + keys.sort() + tmp = str([(k, v[k]) for k in keys]) + else: + tmp = str(v) + d = Utils.md5(tmp.encode()).hexdigest().upper() + if prefix: + d = '%s%s' % (prefix, d[8:]) + gid = uuid.UUID(d, version = 4) + return str(gid).upper() + +def diff(node, fromnode): + # difference between two nodes, but with "(..)" instead of ".." + c1 = node + c2 = fromnode + + c1h = c1.height() + c2h = c2.height() + + lst = [] + up = 0 + + while c1h > c2h: + lst.append(c1.name) + c1 = c1.parent + c1h -= 1 + + while c2h > c1h: + up += 1 + c2 = c2.parent + c2h -= 1 + + while id(c1) != id(c2): + lst.append(c1.name) + up += 1 + + c1 = c1.parent + c2 = c2.parent + + for i in range(up): + lst.append('(..)') + lst.reverse() + return tuple(lst) + +class build_property(object): + pass + +class vsnode(object): + """ + Abstract class representing visual studio elements + We assume that all visual studio nodes have a uuid and a parent + """ + def __init__(self, ctx): + self.ctx = ctx # msvs context + self.name = '' # string, mandatory + self.vspath = '' # path in visual studio (name for dirs, absolute path for projects) + self.uuid = '' # string, mandatory + self.parent = None # parent node for visual studio nesting + + def get_waf(self): + """ + Override in subclasses... + """ + return 'cd /d "%s" & %s' % (self.ctx.srcnode.win32path(), getattr(self.ctx, 'waf_command', 'waf.bat')) + + def ptype(self): + """ + Return a special uuid for projects written in the solution file + """ + pass + + def write(self): + """ + Write the project file, by default, do nothing + """ + pass + + def make_uuid(self, val): + """ + Alias for creating uuid values easily (the templates cannot access global variables) + """ + return make_uuid(val) + +class vsnode_vsdir(vsnode): + """ + Nodes representing visual studio folders (which do not match the filesystem tree!) + """ + VS_GUID_SOLUTIONFOLDER = "2150E333-8FDC-42A3-9474-1A3956D46DE8" + def __init__(self, ctx, uuid, name, vspath=''): + vsnode.__init__(self, ctx) + self.title = self.name = name + self.uuid = uuid + self.vspath = vspath or name + + def ptype(self): + return self.VS_GUID_SOLUTIONFOLDER + +class vsnode_project(vsnode): + """ + Abstract class representing visual studio project elements + A project is assumed to be writable, and has a node representing the file to write to + """ + VS_GUID_VCPROJ = "8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942" + def ptype(self): + return self.VS_GUID_VCPROJ + + def __init__(self, ctx, node): + vsnode.__init__(self, ctx) + self.path = node + self.uuid = make_uuid(node.win32path()) + self.name = node.name + self.platform_toolset_ver = getattr(ctx, 'platform_toolset_ver', None) + self.title = self.path.win32path() + self.source = [] # list of node objects + self.build_properties = [] # list of properties (nmake commands, output dir, etc) + + def dirs(self): + """ + Get the list of parent folders of the source files (header files included) + for writing the filters + """ + lst = [] + def add(x): + if x.height() > self.tg.path.height() and x not in lst: + lst.append(x) + add(x.parent) + for x in self.source: + add(x.parent) + return lst + + def write(self): + Logs.debug('msvs: creating %r', self.path) + + # first write the project file + template1 = compile_template(PROJECT_TEMPLATE) + proj_str = template1(self) + proj_str = rm_blank_lines(proj_str) + self.path.stealth_write(proj_str) + + # then write the filter + template2 = compile_template(FILTER_TEMPLATE) + filter_str = template2(self) + filter_str = rm_blank_lines(filter_str) + tmp = self.path.parent.make_node(self.path.name + '.filters') + tmp.stealth_write(filter_str) + + def get_key(self, node): + """ + required for writing the source files + """ + name = node.name + if name.endswith(('.cpp', '.c')): + return 'ClCompile' + return 'ClInclude' + + def collect_properties(self): + """ + Returns a list of triplet (configuration, platform, output_directory) + """ + ret = [] + for c in self.ctx.configurations: + for p in self.ctx.platforms: + x = build_property() + x.outdir = '' + + x.configuration = c + x.platform = p + + x.preprocessor_definitions = '' + x.includes_search_path = '' + + # can specify "deploy_dir" too + ret.append(x) + self.build_properties = ret + + def get_build_params(self, props): + opt = '--execsolution="%s"' % self.ctx.get_solution_node().win32path() + return (self.get_waf(), opt) + + def get_build_command(self, props): + return "%s build %s" % self.get_build_params(props) + + def get_clean_command(self, props): + return "%s clean %s" % self.get_build_params(props) + + def get_rebuild_command(self, props): + return "%s clean build %s" % self.get_build_params(props) + + def get_filter_name(self, node): + lst = diff(node, self.tg.path) + return '\\'.join(lst) or '.' + +class vsnode_alias(vsnode_project): + def __init__(self, ctx, node, name): + vsnode_project.__init__(self, ctx, node) + self.name = name + self.output_file = '' + +class vsnode_build_all(vsnode_alias): + """ + Fake target used to emulate the behaviour of "make all" (starting one process by target is slow) + This is the only alias enabled by default + """ + def __init__(self, ctx, node, name='build_all_projects'): + vsnode_alias.__init__(self, ctx, node, name) + self.is_active = True + +class vsnode_install_all(vsnode_alias): + """ + Fake target used to emulate the behaviour of "make install" + """ + def __init__(self, ctx, node, name='install_all_projects'): + vsnode_alias.__init__(self, ctx, node, name) + + def get_build_command(self, props): + return "%s build install %s" % self.get_build_params(props) + + def get_clean_command(self, props): + return "%s clean %s" % self.get_build_params(props) + + def get_rebuild_command(self, props): + return "%s clean build install %s" % self.get_build_params(props) + +class vsnode_project_view(vsnode_alias): + """ + Fake target used to emulate a file system view + """ + def __init__(self, ctx, node, name='project_view'): + vsnode_alias.__init__(self, ctx, node, name) + self.tg = self.ctx() # fake one, cannot remove + self.exclude_files = Node.exclude_regs + ''' +waf-2* +waf3-2*/** +.waf-2* +.waf3-2*/** +**/*.sdf +**/*.suo +**/*.ncb +**/%s + ''' % Options.lockfile + + def collect_source(self): + # this is likely to be slow + self.source = self.ctx.srcnode.ant_glob('**', excl=self.exclude_files) + + def get_build_command(self, props): + params = self.get_build_params(props) + (self.ctx.cmd,) + return "%s %s %s" % params + + def get_clean_command(self, props): + return "" + + def get_rebuild_command(self, props): + return self.get_build_command(props) + +class vsnode_target(vsnode_project): + """ + Visual studio project representing a targets (programs, libraries, etc) and bound + to a task generator + """ + def __init__(self, ctx, tg): + """ + A project is more or less equivalent to a file/folder + """ + base = getattr(ctx, 'projects_dir', None) or tg.path + node = base.make_node(quote(tg.name) + ctx.project_extension) # the project file as a Node + vsnode_project.__init__(self, ctx, node) + self.name = quote(tg.name) + self.tg = tg # task generator + + def get_build_params(self, props): + """ + Override the default to add the target name + """ + opt = '--execsolution="%s"' % self.ctx.get_solution_node().win32path() + if getattr(self, 'tg', None): + opt += " --targets=%s" % self.tg.name + return (self.get_waf(), opt) + + def collect_source(self): + tg = self.tg + source_files = tg.to_nodes(getattr(tg, 'source', [])) + include_dirs = Utils.to_list(getattr(tg, 'msvs_includes', [])) + include_files = [] + for x in include_dirs: + if isinstance(x, str): + x = tg.path.find_node(x) + if x: + lst = [y for y in x.ant_glob(HEADERS_GLOB, flat=False)] + include_files.extend(lst) + + # remove duplicates + self.source.extend(list(set(source_files + include_files))) + self.source.sort(key=lambda x: x.win32path()) + + def collect_properties(self): + """ + Visual studio projects are associated with platforms and configurations (for building especially) + """ + super(vsnode_target, self).collect_properties() + for x in self.build_properties: + x.outdir = self.path.parent.win32path() + x.preprocessor_definitions = '' + x.includes_search_path = '' + + try: + tsk = self.tg.link_task + except AttributeError: + pass + else: + x.output_file = tsk.outputs[0].win32path() + x.preprocessor_definitions = ';'.join(tsk.env.DEFINES) + x.includes_search_path = ';'.join(self.tg.env.INCPATHS) + +class msvs_generator(BuildContext): + '''generates a visual studio 2010 solution''' + cmd = 'msvs' + fun = 'build' + numver = '11.00' # Visual Studio Version Number + vsver = '2010' # Visual Studio Version Year + platform_toolset_ver = 'v110' # Platform Toolset Version Number + + def init(self): + """ + Some data that needs to be present + """ + if not getattr(self, 'configurations', None): + self.configurations = ['Release'] # LocalRelease, RemoteDebug, etc + if not getattr(self, 'platforms', None): + self.platforms = ['Win32'] + if not getattr(self, 'all_projects', None): + self.all_projects = [] + if not getattr(self, 'project_extension', None): + self.project_extension = '.vcxproj' + if not getattr(self, 'projects_dir', None): + self.projects_dir = self.srcnode.make_node('.depproj') + self.projects_dir.mkdir() + + # bind the classes to the object, so that subclass can provide custom generators + if not getattr(self, 'vsnode_vsdir', None): + self.vsnode_vsdir = vsnode_vsdir + if not getattr(self, 'vsnode_target', None): + self.vsnode_target = vsnode_target + if not getattr(self, 'vsnode_build_all', None): + self.vsnode_build_all = vsnode_build_all + if not getattr(self, 'vsnode_install_all', None): + self.vsnode_install_all = vsnode_install_all + if not getattr(self, 'vsnode_project_view', None): + self.vsnode_project_view = vsnode_project_view + + self.numver = self.__class__.numver + self.vsver = self.__class__.vsver + self.platform_toolset_ver = self.__class__.platform_toolset_ver + + def execute(self): + """ + Entry point + """ + self.restore() + if not self.all_envs: + self.load_envs() + self.recurse([self.run_dir]) + + # user initialization + self.init() + + # two phases for creating the solution + self.collect_projects() # add project objects into "self.all_projects" + self.write_files() # write the corresponding project and solution files + + def collect_projects(self): + """ + Fill the list self.all_projects with project objects + Fill the list of build targets + """ + self.collect_targets() + self.add_aliases() + self.collect_dirs() + default_project = getattr(self, 'default_project', None) + def sortfun(x): + # folders should sort to the top + if getattr(x, 'VS_GUID_SOLUTIONFOLDER', None): + return '' + # followed by the default project + elif x.name == default_project: + return ' ' + return getattr(x, 'path', None) and x.path.win32path() or x.name + self.all_projects.sort(key=sortfun) + + def write_files(self): + """ + Write the project and solution files from the data collected + so far. It is unlikely that you will want to change this + """ + for p in self.all_projects: + p.write() + + # and finally write the solution file + node = self.get_solution_node() + node.parent.mkdir() + Logs.warn('Creating %r', node) + template1 = compile_template(SOLUTION_TEMPLATE) + sln_str = template1(self) + sln_str = rm_blank_lines(sln_str) + node.stealth_write(sln_str) + + def get_solution_node(self): + """ + The solution filename is required when writing the .vcproj files + return self.solution_node and if it does not exist, make one + """ + try: + return self.solution_node + except AttributeError: + pass + + solution_name = getattr(self, 'solution_name', None) + if not solution_name: + solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '.sln' + if os.path.isabs(solution_name): + self.solution_node = self.root.make_node(solution_name) + else: + self.solution_node = self.srcnode.make_node(solution_name) + return self.solution_node + + def project_configurations(self): + """ + Helper that returns all the pairs (config,platform) + """ + ret = [] + for c in self.configurations: + for p in self.platforms: + ret.append((c, p)) + return ret + + def collect_targets(self): + """ + Process the list of task generators + """ + for g in self.groups: + for tg in g: + if not isinstance(tg, TaskGen.task_gen): + continue + + if not hasattr(tg, 'msvs_includes'): + tg.msvs_includes = tg.to_list(getattr(tg, 'includes', [])) + tg.to_list(getattr(tg, 'export_includes', [])) + tg.post() + if not getattr(tg, 'link_task', None): + continue + + p = self.vsnode_target(self, tg) + p.collect_source() # delegate this processing + p.collect_properties() + self.all_projects.append(p) + + def add_aliases(self): + """ + Add a specific target that emulates the "make all" necessary for Visual studio when pressing F7 + We also add an alias for "make install" (disabled by default) + """ + base = getattr(self, 'projects_dir', None) or self.tg.path + + node_project = base.make_node('build_all_projects' + self.project_extension) # Node + p_build = self.vsnode_build_all(self, node_project) + p_build.collect_properties() + self.all_projects.append(p_build) + + node_project = base.make_node('install_all_projects' + self.project_extension) # Node + p_install = self.vsnode_install_all(self, node_project) + p_install.collect_properties() + self.all_projects.append(p_install) + + node_project = base.make_node('project_view' + self.project_extension) # Node + p_view = self.vsnode_project_view(self, node_project) + p_view.collect_source() + p_view.collect_properties() + self.all_projects.append(p_view) + + n = self.vsnode_vsdir(self, make_uuid(self.srcnode.win32path() + 'build_aliases'), "build_aliases") + p_build.parent = p_install.parent = p_view.parent = n + self.all_projects.append(n) + + def collect_dirs(self): + """ + Create the folder structure in the Visual studio project view + """ + seen = {} + def make_parents(proj): + # look at a project, try to make a parent + if getattr(proj, 'parent', None): + # aliases already have parents + return + x = proj.iter_path + if x in seen: + proj.parent = seen[x] + return + + # There is not vsnode_vsdir for x. + # So create a project representing the folder "x" + n = proj.parent = seen[x] = self.vsnode_vsdir(self, make_uuid(x.win32path()), x.name) + n.iter_path = x.parent + self.all_projects.append(n) + + # recurse up to the project directory + if x.height() > self.srcnode.height() + 1: + make_parents(n) + + for p in self.all_projects[:]: # iterate over a copy of all projects + if not getattr(p, 'tg', None): + # but only projects that have a task generator + continue + + # make a folder for each task generator + p.iter_path = p.tg.path + make_parents(p) + +def wrap_2008(cls): + class dec(cls): + def __init__(self, *k, **kw): + cls.__init__(self, *k, **kw) + self.project_template = PROJECT_2008_TEMPLATE + + def display_filter(self): + + root = build_property() + root.subfilters = [] + root.sourcefiles = [] + root.source = [] + root.name = '' + + @Utils.run_once + def add_path(lst): + if not lst: + return root + child = build_property() + child.subfilters = [] + child.sourcefiles = [] + child.source = [] + child.name = lst[-1] + + par = add_path(lst[:-1]) + par.subfilters.append(child) + return child + + for x in self.source: + # this crap is for enabling subclasses to override get_filter_name + tmp = self.get_filter_name(x.parent) + tmp = tmp != '.' and tuple(tmp.split('\\')) or () + par = add_path(tmp) + par.source.append(x) + + def display(n): + buf = [] + for x in n.source: + buf.append('<File RelativePath="%s" FileType="%s"/>\n' % (xml_escape(x.win32path()), self.get_key(x))) + for x in n.subfilters: + buf.append('<Filter Name="%s">' % xml_escape(x.name)) + buf.append(display(x)) + buf.append('</Filter>') + return '\n'.join(buf) + + return display(root) + + def get_key(self, node): + """ + If you do not want to let visual studio use the default file extensions, + override this method to return a value: + 0: C/C++ Code, 1: C++ Class, 2: C++ Header File, 3: C++ Form, + 4: C++ Control, 5: Text File, 6: DEF File, 7: IDL File, + 8: Makefile, 9: RGS File, 10: RC File, 11: RES File, 12: XSD File, + 13: XML File, 14: HTML File, 15: CSS File, 16: Bitmap, 17: Icon, + 18: Resx File, 19: BSC File, 20: XSX File, 21: C++ Web Service, + 22: ASAX File, 23: Asp Page, 24: Document, 25: Discovery File, + 26: C# File, 27: eFileTypeClassDiagram, 28: MHTML Document, + 29: Property Sheet, 30: Cursor, 31: Manifest, 32: eFileTypeRDLC + """ + return '' + + def write(self): + Logs.debug('msvs: creating %r', self.path) + template1 = compile_template(self.project_template) + proj_str = template1(self) + proj_str = rm_blank_lines(proj_str) + self.path.stealth_write(proj_str) + + return dec + +class msvs_2008_generator(msvs_generator): + '''generates a visual studio 2008 solution''' + cmd = 'msvs2008' + fun = msvs_generator.fun + numver = '10.00' + vsver = '2008' + + def init(self): + if not getattr(self, 'project_extension', None): + self.project_extension = '_2008.vcproj' + if not getattr(self, 'solution_name', None): + self.solution_name = getattr(Context.g_module, Context.APPNAME, 'project') + '_2008.sln' + + if not getattr(self, 'vsnode_target', None): + self.vsnode_target = wrap_2008(vsnode_target) + if not getattr(self, 'vsnode_build_all', None): + self.vsnode_build_all = wrap_2008(vsnode_build_all) + if not getattr(self, 'vsnode_install_all', None): + self.vsnode_install_all = wrap_2008(vsnode_install_all) + if not getattr(self, 'vsnode_project_view', None): + self.vsnode_project_view = wrap_2008(vsnode_project_view) + + msvs_generator.init(self) + +def options(ctx): + """ + If the msvs option is used, try to detect if the build is made from visual studio + """ + ctx.add_option('--execsolution', action='store', help='when building with visual studio, use a build state file') + + old = BuildContext.execute + def override_build_state(ctx): + def lock(rm, add): + uns = ctx.options.execsolution.replace('.sln', rm) + uns = ctx.root.make_node(uns) + try: + uns.delete() + except OSError: + pass + + uns = ctx.options.execsolution.replace('.sln', add) + uns = ctx.root.make_node(uns) + try: + uns.write('') + except EnvironmentError: + pass + + if ctx.options.execsolution: + ctx.launch_dir = Context.top_dir # force a build for the whole project (invalid cwd when called by visual studio) + lock('.lastbuildstate', '.unsuccessfulbuild') + old(ctx) + lock('.unsuccessfulbuild', '.lastbuildstate') + else: + old(ctx) + BuildContext.execute = override_build_state + diff --git a/third_party/waf/waflib/extras/netcache_client.py b/third_party/waf/waflib/extras/netcache_client.py new file mode 100644 index 0000000..dc49048 --- /dev/null +++ b/third_party/waf/waflib/extras/netcache_client.py @@ -0,0 +1,390 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011-2015 (ita) + +""" +A client for the network cache (playground/netcache/). Launch the server with: +./netcache_server, then use it for the builds by adding the following: + + def build(bld): + bld.load('netcache_client') + +The parameters should be present in the environment in the form: + NETCACHE=host:port waf configure build + +Or in a more detailed way: + NETCACHE_PUSH=host:port NETCACHE_PULL=host:port waf configure build + +where: + host: host where the server resides, by default localhost + port: by default push on 11001 and pull on 12001 + +Use the server provided in playground/netcache/Netcache.java +""" + +import os, socket, time, atexit, sys +from waflib import Task, Logs, Utils, Build, Runner +from waflib.Configure import conf + +BUF = 8192 * 16 +HEADER_SIZE = 128 +MODES = ['PUSH', 'PULL', 'PUSH_PULL'] +STALE_TIME = 30 # seconds + +GET = 'GET' +PUT = 'PUT' +LST = 'LST' +BYE = 'BYE' + +all_sigs_in_cache = (0.0, []) + +def put_data(conn, data): + if sys.hexversion > 0x3000000: + data = data.encode('latin-1') + cnt = 0 + while cnt < len(data): + sent = conn.send(data[cnt:]) + if sent == 0: + raise RuntimeError('connection ended') + cnt += sent + +push_connections = Runner.Queue(0) +pull_connections = Runner.Queue(0) +def get_connection(push=False): + # return a new connection... do not forget to release it! + try: + if push: + ret = push_connections.get(block=False) + else: + ret = pull_connections.get(block=False) + except Exception: + ret = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if push: + ret.connect(Task.push_addr) + else: + ret.connect(Task.pull_addr) + return ret + +def release_connection(conn, msg='', push=False): + if conn: + if push: + push_connections.put(conn) + else: + pull_connections.put(conn) + +def close_connection(conn, msg=''): + if conn: + data = '%s,%s' % (BYE, msg) + try: + put_data(conn, data.ljust(HEADER_SIZE)) + except: + pass + try: + conn.close() + except: + pass + +def close_all(): + for q in (push_connections, pull_connections): + while q.qsize(): + conn = q.get() + try: + close_connection(conn) + except: + # ignore errors when cleaning up + pass +atexit.register(close_all) + +def read_header(conn): + cnt = 0 + buf = [] + while cnt < HEADER_SIZE: + data = conn.recv(HEADER_SIZE - cnt) + if not data: + #import traceback + #traceback.print_stack() + raise ValueError('connection ended when reading a header %r' % buf) + buf.append(data) + cnt += len(data) + if sys.hexversion > 0x3000000: + ret = ''.encode('latin-1').join(buf) + ret = ret.decode('latin-1') + else: + ret = ''.join(buf) + return ret + +def check_cache(conn, ssig): + """ + List the files on the server, this is an optimization because it assumes that + concurrent builds are rare + """ + global all_sigs_in_cache + if not STALE_TIME: + return + if time.time() - all_sigs_in_cache[0] > STALE_TIME: + + params = (LST,'') + put_data(conn, ','.join(params).ljust(HEADER_SIZE)) + + # read what is coming back + ret = read_header(conn) + size = int(ret.split(',')[0]) + + buf = [] + cnt = 0 + while cnt < size: + data = conn.recv(min(BUF, size-cnt)) + if not data: + raise ValueError('connection ended %r %r' % (cnt, size)) + buf.append(data) + cnt += len(data) + + if sys.hexversion > 0x3000000: + ret = ''.encode('latin-1').join(buf) + ret = ret.decode('latin-1') + else: + ret = ''.join(buf) + + all_sigs_in_cache = (time.time(), ret.splitlines()) + Logs.debug('netcache: server cache has %r entries', len(all_sigs_in_cache[1])) + + if not ssig in all_sigs_in_cache[1]: + raise ValueError('no file %s in cache' % ssig) + +class MissingFile(Exception): + pass + +def recv_file(conn, ssig, count, p): + check_cache(conn, ssig) + + params = (GET, ssig, str(count)) + put_data(conn, ','.join(params).ljust(HEADER_SIZE)) + data = read_header(conn) + + size = int(data.split(',')[0]) + + if size == -1: + raise MissingFile('no file %s - %s in cache' % (ssig, count)) + + # get the file, writing immediately + # TODO a tmp file would be better + f = open(p, 'wb') + cnt = 0 + while cnt < size: + data = conn.recv(min(BUF, size-cnt)) + if not data: + raise ValueError('connection ended %r %r' % (cnt, size)) + f.write(data) + cnt += len(data) + f.close() + +def sock_send(conn, ssig, cnt, p): + #print "pushing %r %r %r" % (ssig, cnt, p) + size = os.stat(p).st_size + params = (PUT, ssig, str(cnt), str(size)) + put_data(conn, ','.join(params).ljust(HEADER_SIZE)) + f = open(p, 'rb') + cnt = 0 + while cnt < size: + r = f.read(min(BUF, size-cnt)) + while r: + k = conn.send(r) + if not k: + raise ValueError('connection ended') + cnt += k + r = r[k:] + +def can_retrieve_cache(self): + if not Task.pull_addr: + return False + if not self.outputs: + return False + self.cached = False + + cnt = 0 + sig = self.signature() + ssig = Utils.to_hex(self.uid() + sig) + + conn = None + err = False + try: + try: + conn = get_connection() + for node in self.outputs: + p = node.abspath() + recv_file(conn, ssig, cnt, p) + cnt += 1 + except MissingFile as e: + Logs.debug('netcache: file is not in the cache %r', e) + err = True + except Exception as e: + Logs.debug('netcache: could not get the files %r', self.outputs) + if Logs.verbose > 1: + Logs.debug('netcache: exception %r', e) + err = True + + # broken connection? remove this one + close_connection(conn) + conn = None + else: + Logs.debug('netcache: obtained %r from cache', self.outputs) + + finally: + release_connection(conn) + if err: + return False + + self.cached = True + return True + +@Utils.run_once +def put_files_cache(self): + if not Task.push_addr: + return + if not self.outputs: + return + if getattr(self, 'cached', None): + return + + #print "called put_files_cache", id(self) + bld = self.generator.bld + sig = self.signature() + ssig = Utils.to_hex(self.uid() + sig) + + conn = None + cnt = 0 + try: + for node in self.outputs: + # We could re-create the signature of the task with the signature of the outputs + # in practice, this means hashing the output files + # this is unnecessary + try: + if not conn: + conn = get_connection(push=True) + sock_send(conn, ssig, cnt, node.abspath()) + Logs.debug('netcache: sent %r', node) + except Exception as e: + Logs.debug('netcache: could not push the files %r', e) + + # broken connection? remove this one + close_connection(conn) + conn = None + cnt += 1 + finally: + release_connection(conn, push=True) + + bld.task_sigs[self.uid()] = self.cache_sig + +def hash_env_vars(self, env, vars_lst): + # reimplement so that the resulting hash does not depend on local paths + if not env.table: + env = env.parent + if not env: + return Utils.SIG_NIL + + idx = str(id(env)) + str(vars_lst) + try: + cache = self.cache_env + except AttributeError: + cache = self.cache_env = {} + else: + try: + return self.cache_env[idx] + except KeyError: + pass + + v = str([env[a] for a in vars_lst]) + v = v.replace(self.srcnode.abspath().__repr__()[:-1], '') + m = Utils.md5() + m.update(v.encode()) + ret = m.digest() + + Logs.debug('envhash: %r %r', ret, v) + + cache[idx] = ret + + return ret + +def uid(self): + # reimplement so that the signature does not depend on local paths + try: + return self.uid_ + except AttributeError: + m = Utils.md5() + src = self.generator.bld.srcnode + up = m.update + up(self.__class__.__name__.encode()) + for x in self.inputs + self.outputs: + up(x.path_from(src).encode()) + self.uid_ = m.digest() + return self.uid_ + + +def make_cached(cls): + if getattr(cls, 'nocache', None): + return + + m1 = cls.run + def run(self): + if getattr(self, 'nocache', False): + return m1(self) + if self.can_retrieve_cache(): + return 0 + return m1(self) + cls.run = run + + m2 = cls.post_run + def post_run(self): + if getattr(self, 'nocache', False): + return m2(self) + bld = self.generator.bld + ret = m2(self) + if bld.cache_global: + self.put_files_cache() + if hasattr(self, 'chmod'): + for node in self.outputs: + os.chmod(node.abspath(), self.chmod) + return ret + cls.post_run = post_run + +@conf +def setup_netcache(ctx, push_addr, pull_addr): + Task.Task.can_retrieve_cache = can_retrieve_cache + Task.Task.put_files_cache = put_files_cache + Task.Task.uid = uid + Task.push_addr = push_addr + Task.pull_addr = pull_addr + Build.BuildContext.hash_env_vars = hash_env_vars + ctx.cache_global = True + + for x in Task.classes.values(): + make_cached(x) + +def build(bld): + if not 'NETCACHE' in os.environ and not 'NETCACHE_PULL' in os.environ and not 'NETCACHE_PUSH' in os.environ: + Logs.warn('Setting NETCACHE_PULL=127.0.0.1:11001 and NETCACHE_PUSH=127.0.0.1:12001') + os.environ['NETCACHE_PULL'] = '127.0.0.1:12001' + os.environ['NETCACHE_PUSH'] = '127.0.0.1:11001' + + if 'NETCACHE' in os.environ: + if not 'NETCACHE_PUSH' in os.environ: + os.environ['NETCACHE_PUSH'] = os.environ['NETCACHE'] + if not 'NETCACHE_PULL' in os.environ: + os.environ['NETCACHE_PULL'] = os.environ['NETCACHE'] + + v = os.environ['NETCACHE_PULL'] + if v: + h, p = v.split(':') + pull_addr = (h, int(p)) + else: + pull_addr = None + + v = os.environ['NETCACHE_PUSH'] + if v: + h, p = v.split(':') + push_addr = (h, int(p)) + else: + push_addr = None + + setup_netcache(bld, push_addr, pull_addr) + diff --git a/third_party/waf/waflib/extras/objcopy.py b/third_party/waf/waflib/extras/objcopy.py new file mode 100644 index 0000000..bb7ca6e --- /dev/null +++ b/third_party/waf/waflib/extras/objcopy.py @@ -0,0 +1,53 @@ +#!/usr/bin/python +# Grygoriy Fuchedzhy 2010 + +""" +Support for converting linked targets to ihex, srec or binary files using +objcopy. Use the 'objcopy' feature in conjunction with the 'cc' or 'cxx' +feature. The 'objcopy' feature uses the following attributes: + +objcopy_bfdname Target object format name (eg. ihex, srec, binary). + Defaults to ihex. +objcopy_target File name used for objcopy output. This defaults to the + target name with objcopy_bfdname as extension. +objcopy_install_path Install path for objcopy_target file. Defaults to ${PREFIX}/fw. +objcopy_flags Additional flags passed to objcopy. +""" + +from waflib.Utils import def_attrs +from waflib import Task, Options +from waflib.TaskGen import feature, after_method + +class objcopy(Task.Task): + run_str = '${OBJCOPY} -O ${TARGET_BFDNAME} ${OBJCOPYFLAGS} ${SRC} ${TGT}' + color = 'CYAN' + +@feature('objcopy') +@after_method('apply_link') +def map_objcopy(self): + def_attrs(self, + objcopy_bfdname = 'ihex', + objcopy_target = None, + objcopy_install_path = "${PREFIX}/firmware", + objcopy_flags = '') + + link_output = self.link_task.outputs[0] + if not self.objcopy_target: + self.objcopy_target = link_output.change_ext('.' + self.objcopy_bfdname).name + task = self.create_task('objcopy', src=link_output, tgt=self.path.find_or_declare(self.objcopy_target)) + + task.env.append_unique('TARGET_BFDNAME', self.objcopy_bfdname) + try: + task.env.append_unique('OBJCOPYFLAGS', getattr(self, 'objcopy_flags')) + except AttributeError: + pass + + if self.objcopy_install_path: + self.add_install_files(install_to=self.objcopy_install_path, install_from=task.outputs[0]) + +def configure(ctx): + program_name = 'objcopy' + prefix = getattr(Options.options, 'cross_prefix', None) + if prefix: + program_name = '{}-{}'.format(prefix, program_name) + ctx.find_program(program_name, var='OBJCOPY', mandatory=True) diff --git a/third_party/waf/waflib/extras/ocaml.py b/third_party/waf/waflib/extras/ocaml.py new file mode 100644 index 0000000..7d785c6 --- /dev/null +++ b/third_party/waf/waflib/extras/ocaml.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2010 (ita) + +"ocaml support" + +import os, re +from waflib import Utils, Task +from waflib.Logs import error +from waflib.TaskGen import feature, before_method, after_method, extension + +EXT_MLL = ['.mll'] +EXT_MLY = ['.mly'] +EXT_MLI = ['.mli'] +EXT_MLC = ['.c'] +EXT_ML = ['.ml'] + +open_re = re.compile(r'^\s*open\s+([a-zA-Z]+)(;;){0,1}$', re.M) +foo = re.compile(r"""(\(\*)|(\*\))|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^()*"'\\]*)""", re.M) +def filter_comments(txt): + meh = [0] + def repl(m): + if m.group(1): + meh[0] += 1 + elif m.group(2): + meh[0] -= 1 + elif not meh[0]: + return m.group() + return '' + return foo.sub(repl, txt) + +def scan(self): + node = self.inputs[0] + code = filter_comments(node.read()) + + global open_re + names = [] + import_iterator = open_re.finditer(code) + if import_iterator: + for import_match in import_iterator: + names.append(import_match.group(1)) + found_lst = [] + raw_lst = [] + for name in names: + nd = None + for x in self.incpaths: + nd = x.find_resource(name.lower()+'.ml') + if not nd: + nd = x.find_resource(name+'.ml') + if nd: + found_lst.append(nd) + break + else: + raw_lst.append(name) + + return (found_lst, raw_lst) + +native_lst=['native', 'all', 'c_object'] +bytecode_lst=['bytecode', 'all'] + +@feature('ocaml') +def init_ml(self): + Utils.def_attrs(self, + type = 'all', + incpaths_lst = [], + bld_incpaths_lst = [], + mlltasks = [], + mlytasks = [], + mlitasks = [], + native_tasks = [], + bytecode_tasks = [], + linktasks = [], + bytecode_env = None, + native_env = None, + compiled_tasks = [], + includes = '', + uselib = '', + are_deps_set = 0) + +@feature('ocaml') +@after_method('init_ml') +def init_envs_ml(self): + + self.islibrary = getattr(self, 'islibrary', False) + + global native_lst, bytecode_lst + self.native_env = None + if self.type in native_lst: + self.native_env = self.env.derive() + if self.islibrary: + self.native_env['OCALINKFLAGS'] = '-a' + + self.bytecode_env = None + if self.type in bytecode_lst: + self.bytecode_env = self.env.derive() + if self.islibrary: + self.bytecode_env['OCALINKFLAGS'] = '-a' + + if self.type == 'c_object': + self.native_env.append_unique('OCALINKFLAGS_OPT', '-output-obj') + +@feature('ocaml') +@before_method('apply_vars_ml') +@after_method('init_envs_ml') +def apply_incpaths_ml(self): + inc_lst = self.includes.split() + lst = self.incpaths_lst + for dir in inc_lst: + node = self.path.find_dir(dir) + if not node: + error("node not found: " + str(dir)) + continue + if not node in lst: + lst.append(node) + self.bld_incpaths_lst.append(node) + # now the nodes are added to self.incpaths_lst + +@feature('ocaml') +@before_method('process_source') +def apply_vars_ml(self): + for i in self.incpaths_lst: + if self.bytecode_env: + app = self.bytecode_env.append_value + app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()]) + + if self.native_env: + app = self.native_env.append_value + app('OCAMLPATH', ['-I', i.bldpath(), '-I', i.srcpath()]) + + varnames = ['INCLUDES', 'OCAMLFLAGS', 'OCALINKFLAGS', 'OCALINKFLAGS_OPT'] + for name in self.uselib.split(): + for vname in varnames: + cnt = self.env[vname+'_'+name] + if cnt: + if self.bytecode_env: + self.bytecode_env.append_value(vname, cnt) + if self.native_env: + self.native_env.append_value(vname, cnt) + +@feature('ocaml') +@after_method('process_source') +def apply_link_ml(self): + + if self.bytecode_env: + ext = self.islibrary and '.cma' or '.run' + + linktask = self.create_task('ocalink') + linktask.bytecode = 1 + linktask.set_outputs(self.path.find_or_declare(self.target + ext)) + linktask.env = self.bytecode_env + self.linktasks.append(linktask) + + if self.native_env: + if self.type == 'c_object': + ext = '.o' + elif self.islibrary: + ext = '.cmxa' + else: + ext = '' + + linktask = self.create_task('ocalinkx') + linktask.set_outputs(self.path.find_or_declare(self.target + ext)) + linktask.env = self.native_env + self.linktasks.append(linktask) + + # we produce a .o file to be used by gcc + self.compiled_tasks.append(linktask) + +@extension(*EXT_MLL) +def mll_hook(self, node): + mll_task = self.create_task('ocamllex', node, node.change_ext('.ml')) + mll_task.env = self.native_env.derive() + self.mlltasks.append(mll_task) + + self.source.append(mll_task.outputs[0]) + +@extension(*EXT_MLY) +def mly_hook(self, node): + mly_task = self.create_task('ocamlyacc', node, [node.change_ext('.ml'), node.change_ext('.mli')]) + mly_task.env = self.native_env.derive() + self.mlytasks.append(mly_task) + self.source.append(mly_task.outputs[0]) + + task = self.create_task('ocamlcmi', mly_task.outputs[1], mly_task.outputs[1].change_ext('.cmi')) + task.env = self.native_env.derive() + +@extension(*EXT_MLI) +def mli_hook(self, node): + task = self.create_task('ocamlcmi', node, node.change_ext('.cmi')) + task.env = self.native_env.derive() + self.mlitasks.append(task) + +@extension(*EXT_MLC) +def mlc_hook(self, node): + task = self.create_task('ocamlcc', node, node.change_ext('.o')) + task.env = self.native_env.derive() + self.compiled_tasks.append(task) + +@extension(*EXT_ML) +def ml_hook(self, node): + if self.native_env: + task = self.create_task('ocamlx', node, node.change_ext('.cmx')) + task.env = self.native_env.derive() + task.incpaths = self.bld_incpaths_lst + self.native_tasks.append(task) + + if self.bytecode_env: + task = self.create_task('ocaml', node, node.change_ext('.cmo')) + task.env = self.bytecode_env.derive() + task.bytecode = 1 + task.incpaths = self.bld_incpaths_lst + self.bytecode_tasks.append(task) + +def compile_may_start(self): + + if not getattr(self, 'flag_deps', ''): + self.flag_deps = 1 + + # the evil part is that we can only compute the dependencies after the + # source files can be read (this means actually producing the source files) + if getattr(self, 'bytecode', ''): + alltasks = self.generator.bytecode_tasks + else: + alltasks = self.generator.native_tasks + + self.signature() # ensure that files are scanned - unfortunately + tree = self.generator.bld + for node in self.inputs: + lst = tree.node_deps[self.uid()] + for depnode in lst: + for t in alltasks: + if t == self: + continue + if depnode in t.inputs: + self.set_run_after(t) + + # TODO necessary to get the signature right - for now + delattr(self, 'cache_sig') + self.signature() + + return Task.Task.runnable_status(self) + +class ocamlx(Task.Task): + """native caml compilation""" + color = 'GREEN' + run_str = '${OCAMLOPT} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}' + scan = scan + runnable_status = compile_may_start + +class ocaml(Task.Task): + """bytecode caml compilation""" + color = 'GREEN' + run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLFLAGS} ${OCAMLINCLUDES} -c -o ${TGT} ${SRC}' + scan = scan + runnable_status = compile_may_start + +class ocamlcmi(Task.Task): + """interface generator (the .i files?)""" + color = 'BLUE' + run_str = '${OCAMLC} ${OCAMLPATH} ${OCAMLINCLUDES} -o ${TGT} -c ${SRC}' + before = ['ocamlcc', 'ocaml', 'ocamlcc'] + +class ocamlcc(Task.Task): + """ocaml to c interfaces""" + color = 'GREEN' + run_str = 'cd ${TGT[0].bld_dir()} && ${OCAMLOPT} ${OCAMLFLAGS} ${OCAMLPATH} ${OCAMLINCLUDES} -c ${SRC[0].abspath()}' + +class ocamllex(Task.Task): + """lexical generator""" + color = 'BLUE' + run_str = '${OCAMLLEX} ${SRC} -o ${TGT}' + before = ['ocamlcmi', 'ocaml', 'ocamlcc'] + +class ocamlyacc(Task.Task): + """parser generator""" + color = 'BLUE' + run_str = '${OCAMLYACC} -b ${tsk.base()} ${SRC}' + before = ['ocamlcmi', 'ocaml', 'ocamlcc'] + + def base(self): + node = self.outputs[0] + s = os.path.splitext(node.name)[0] + return node.bld_dir() + os.sep + s + +def link_may_start(self): + + if getattr(self, 'bytecode', 0): + alltasks = self.generator.bytecode_tasks + else: + alltasks = self.generator.native_tasks + + for x in alltasks: + if not x.hasrun: + return Task.ASK_LATER + + if not getattr(self, 'order', ''): + + # now reorder the inputs given the task dependencies + # this part is difficult, we do not have a total order on the tasks + # if the dependencies are wrong, this may not stop + seen = [] + pendant = []+alltasks + while pendant: + task = pendant.pop(0) + if task in seen: + continue + for x in task.run_after: + if not x in seen: + pendant.append(task) + break + else: + seen.append(task) + self.inputs = [x.outputs[0] for x in seen] + self.order = 1 + return Task.Task.runnable_status(self) + +class ocalink(Task.Task): + """bytecode caml link""" + color = 'YELLOW' + run_str = '${OCAMLC} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS} ${SRC}' + runnable_status = link_may_start + after = ['ocaml', 'ocamlcc'] + +class ocalinkx(Task.Task): + """native caml link""" + color = 'YELLOW' + run_str = '${OCAMLOPT} -o ${TGT} ${OCAMLINCLUDES} ${OCALINKFLAGS_OPT} ${SRC}' + runnable_status = link_may_start + after = ['ocamlx', 'ocamlcc'] + +def configure(conf): + opt = conf.find_program('ocamlopt', var='OCAMLOPT', mandatory=False) + occ = conf.find_program('ocamlc', var='OCAMLC', mandatory=False) + if (not opt) or (not occ): + conf.fatal('The objective caml compiler was not found:\ninstall it or make it available in your PATH') + + v = conf.env + v['OCAMLC'] = occ + v['OCAMLOPT'] = opt + v['OCAMLLEX'] = conf.find_program('ocamllex', var='OCAMLLEX', mandatory=False) + v['OCAMLYACC'] = conf.find_program('ocamlyacc', var='OCAMLYACC', mandatory=False) + v['OCAMLFLAGS'] = '' + where = conf.cmd_and_log(conf.env.OCAMLC + ['-where']).strip()+os.sep + v['OCAMLLIB'] = where + v['LIBPATH_OCAML'] = where + v['INCLUDES_OCAML'] = where + v['LIB_OCAML'] = 'camlrun' + diff --git a/third_party/waf/waflib/extras/package.py b/third_party/waf/waflib/extras/package.py new file mode 100644 index 0000000..c06498e --- /dev/null +++ b/third_party/waf/waflib/extras/package.py @@ -0,0 +1,76 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2011 + +""" +Obtain packages, unpack them in a location, and add associated uselib variables +(CFLAGS_pkgname, LIBPATH_pkgname, etc). + +The default is use a Dependencies.txt file in the source directory. + +This is a work in progress. + +Usage: + +def options(opt): + opt.load('package') + +def configure(conf): + conf.load_packages() +""" + +from waflib import Logs +from waflib.Configure import conf + +try: + from urllib import request +except ImportError: + from urllib import urlopen +else: + urlopen = request.urlopen + + +CACHEVAR = 'WAFCACHE_PACKAGE' + +@conf +def get_package_cache_dir(self): + cache = None + if CACHEVAR in conf.environ: + cache = conf.environ[CACHEVAR] + cache = self.root.make_node(cache) + elif self.env[CACHEVAR]: + cache = self.env[CACHEVAR] + cache = self.root.make_node(cache) + else: + cache = self.srcnode.make_node('.wafcache_package') + cache.mkdir() + return cache + +@conf +def download_archive(self, src, dst): + for x in self.env.PACKAGE_REPO: + url = '/'.join((x, src)) + try: + web = urlopen(url) + try: + if web.getcode() != 200: + continue + except AttributeError: + pass + except Exception: + # on python3 urlopen throws an exception + # python 2.3 does not have getcode and throws an exception to fail + continue + else: + tmp = self.root.make_node(dst) + tmp.write(web.read()) + Logs.warn('Downloaded %s from %s', tmp.abspath(), url) + break + else: + self.fatal('Could not get the package %s' % src) + +@conf +def load_packages(self): + self.get_package_cache_dir() + # read the dependencies, get the archives, .. + diff --git a/third_party/waf/waflib/extras/parallel_debug.py b/third_party/waf/waflib/extras/parallel_debug.py new file mode 100644 index 0000000..4ffec5e --- /dev/null +++ b/third_party/waf/waflib/extras/parallel_debug.py @@ -0,0 +1,462 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2007-2010 (ita) + +""" +Debugging helper for parallel compilation. + +Copy it to your project and load it with:: + + def options(opt): + opt.load('parallel_debug', tooldir='.') + def build(bld): + ... + +The build will then output a file named pdebug.svg in the source directory. +""" + +import re, sys, threading, time, traceback +try: + from Queue import Queue +except: + from queue import Queue +from waflib import Runner, Options, Task, Logs, Errors + +SVG_TEMPLATE = """<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.0//EN" "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd"> +<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" version="1.0" + x="${project.x}" y="${project.y}" width="${project.width}" height="${project.height}" id="svg602" xml:space="preserve"> + +<style type='text/css' media='screen'> + g.over rect { stroke:#FF0000; fill-opacity:0.4 } +</style> + +<script type='text/javascript'><![CDATA[ +var svg = document.getElementsByTagName('svg')[0]; + +svg.addEventListener('mouseover', function(e) { + var g = e.target.parentNode; + var x = document.getElementById('r_' + g.id); + if (x) { + g.setAttribute('class', g.getAttribute('class') + ' over'); + x.setAttribute('class', x.getAttribute('class') + ' over'); + showInfo(e, g.id, e.target.attributes.tooltip.value); + } +}, false); + +svg.addEventListener('mouseout', function(e) { + var g = e.target.parentNode; + var x = document.getElementById('r_' + g.id); + if (x) { + g.setAttribute('class', g.getAttribute('class').replace(' over', '')); + x.setAttribute('class', x.getAttribute('class').replace(' over', '')); + hideInfo(e); + } +}, false); + +function showInfo(evt, txt, details) { +${if project.tooltip} + tooltip = document.getElementById('tooltip'); + + var t = document.getElementById('tooltiptext'); + t.firstChild.data = txt + " " + details; + + var x = evt.clientX + 9; + if (x > 250) { x -= t.getComputedTextLength() + 16; } + var y = evt.clientY + 20; + tooltip.setAttribute("transform", "translate(" + x + "," + y + ")"); + tooltip.setAttributeNS(null, "visibility", "visible"); + + var r = document.getElementById('tooltiprect'); + r.setAttribute('width', t.getComputedTextLength() + 6); +${endif} +} + +function hideInfo(evt) { + var tooltip = document.getElementById('tooltip'); + tooltip.setAttributeNS(null,"visibility","hidden"); +} +]]></script> + +<!-- inkscape requires a big rectangle or it will not export the pictures properly --> +<rect + x='${project.x}' y='${project.y}' width='${project.width}' height='${project.height}' + style="font-size:10;fill:#ffffff;fill-opacity:0.01;fill-rule:evenodd;stroke:#ffffff;"></rect> + +${if project.title} + <text x="${project.title_x}" y="${project.title_y}" + style="font-size:15px; text-anchor:middle; font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans">${project.title}</text> +${endif} + + +${for cls in project.groups} + <g id='${cls.classname}'> + ${for rect in cls.rects} + <rect x='${rect.x}' y='${rect.y}' width='${rect.width}' height='${rect.height}' tooltip='${rect.name}' style="font-size:10;fill:${rect.color};fill-rule:evenodd;stroke:#000000;stroke-width:0.4;" /> + ${endfor} + </g> +${endfor} + +${for info in project.infos} + <g id='r_${info.classname}'> + <rect x='${info.x}' y='${info.y}' width='${info.width}' height='${info.height}' style="font-size:10;fill:${info.color};fill-rule:evenodd;stroke:#000000;stroke-width:0.4;" /> + <text x="${info.text_x}" y="${info.text_y}" + style="font-size:12px;font-style:normal;font-weight:normal;fill:#000000;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1;font-family:Bitstream Vera Sans" + >${info.text}</text> + </g> +${endfor} + +${if project.tooltip} + <g transform="translate(0,0)" visibility="hidden" id="tooltip"> + <rect id="tooltiprect" y="-15" x="-3" width="1" height="20" style="stroke:black;fill:#edefc2;stroke-width:1"/> + <text id="tooltiptext" style="font-family:Arial; font-size:12;fill:black;"> </text> + </g> +${endif} + +</svg> +""" + +COMPILE_TEMPLATE = '''def f(project): + lst = [] + def xml_escape(value): + return value.replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">") + + %s + return ''.join(lst) +''' +reg_act = re.compile(r"(?P<backslash>\\)|(?P<dollar>\$\$)|(?P<subst>\$\{(?P<code>[^}]*?)\})", re.M) +def compile_template(line): + + extr = [] + def repl(match): + g = match.group + if g('dollar'): + return "$" + elif g('backslash'): + return "\\" + elif g('subst'): + extr.append(g('code')) + return "<<|@|>>" + return None + + line2 = reg_act.sub(repl, line) + params = line2.split('<<|@|>>') + assert(extr) + + + indent = 0 + buf = [] + app = buf.append + + def app(txt): + buf.append(indent * '\t' + txt) + + for x in range(len(extr)): + if params[x]: + app("lst.append(%r)" % params[x]) + + f = extr[x] + if f.startswith(('if', 'for')): + app(f + ':') + indent += 1 + elif f.startswith('py:'): + app(f[3:]) + elif f.startswith(('endif', 'endfor')): + indent -= 1 + elif f.startswith(('else', 'elif')): + indent -= 1 + app(f + ':') + indent += 1 + elif f.startswith('xml:'): + app('lst.append(xml_escape(%s))' % f[4:]) + else: + #app('lst.append((%s) or "cannot find %s")' % (f, f)) + app('lst.append(str(%s))' % f) + + if extr: + if params[-1]: + app("lst.append(%r)" % params[-1]) + + fun = COMPILE_TEMPLATE % "\n\t".join(buf) + # uncomment the following to debug the template + #for i, x in enumerate(fun.splitlines()): + # print i, x + return Task.funex(fun) + +# red #ff4d4d +# green #4da74d +# lila #a751ff + +color2code = { + 'GREEN' : '#4da74d', + 'YELLOW' : '#fefe44', + 'PINK' : '#a751ff', + 'RED' : '#cc1d1d', + 'BLUE' : '#6687bb', + 'CYAN' : '#34e2e2', +} + +mp = {} +info = [] # list of (text,color) + +def map_to_color(name): + if name in mp: + return mp[name] + try: + cls = Task.classes[name] + except KeyError: + return color2code['RED'] + if cls.color in mp: + return mp[cls.color] + if cls.color in color2code: + return color2code[cls.color] + return color2code['RED'] + +def process(self): + m = self.generator.bld.producer + try: + # TODO another place for this? + del self.generator.bld.task_sigs[self.uid()] + except KeyError: + pass + + self.generator.bld.producer.set_running(1, self) + + try: + ret = self.run() + except Exception: + self.err_msg = traceback.format_exc() + self.hasrun = Task.EXCEPTION + + # TODO cleanup + m.error_handler(self) + return + + if ret: + self.err_code = ret + self.hasrun = Task.CRASHED + else: + try: + self.post_run() + except Errors.WafError: + pass + except Exception: + self.err_msg = traceback.format_exc() + self.hasrun = Task.EXCEPTION + else: + self.hasrun = Task.SUCCESS + if self.hasrun != Task.SUCCESS: + m.error_handler(self) + + self.generator.bld.producer.set_running(-1, self) + +Task.Task.process_back = Task.Task.process +Task.Task.process = process + +old_start = Runner.Parallel.start +def do_start(self): + try: + Options.options.dband + except AttributeError: + self.bld.fatal('use def options(opt): opt.load("parallel_debug")!') + + self.taskinfo = Queue() + old_start(self) + if self.dirty: + make_picture(self) +Runner.Parallel.start = do_start + +lock_running = threading.Lock() +def set_running(self, by, tsk): + with lock_running: + try: + cache = self.lock_cache + except AttributeError: + cache = self.lock_cache = {} + + i = 0 + if by > 0: + vals = cache.values() + for i in range(self.numjobs): + if i not in vals: + cache[tsk] = i + break + else: + i = cache[tsk] + del cache[tsk] + + self.taskinfo.put( (i, id(tsk), time.time(), tsk.__class__.__name__, self.processed, self.count, by, ",".join(map(str, tsk.outputs))) ) +Runner.Parallel.set_running = set_running + +def name2class(name): + return name.replace(' ', '_').replace('.', '_') + +def make_picture(producer): + # first, cast the parameters + if not hasattr(producer.bld, 'path'): + return + + tmp = [] + try: + while True: + tup = producer.taskinfo.get(False) + tmp.append(list(tup)) + except: + pass + + try: + ini = float(tmp[0][2]) + except: + return + + if not info: + seen = [] + for x in tmp: + name = x[3] + if not name in seen: + seen.append(name) + else: + continue + + info.append((name, map_to_color(name))) + info.sort(key=lambda x: x[0]) + + thread_count = 0 + acc = [] + for x in tmp: + thread_count += x[6] + acc.append("%d %d %f %r %d %d %d %s" % (x[0], x[1], x[2] - ini, x[3], x[4], x[5], thread_count, x[7])) + + data_node = producer.bld.path.make_node('pdebug.dat') + data_node.write('\n'.join(acc)) + + tmp = [lst[:2] + [float(lst[2]) - ini] + lst[3:] for lst in tmp] + + st = {} + for l in tmp: + if not l[0] in st: + st[l[0]] = len(st.keys()) + tmp = [ [st[lst[0]]] + lst[1:] for lst in tmp ] + THREAD_AMOUNT = len(st.keys()) + + st = {} + for l in tmp: + if not l[1] in st: + st[l[1]] = len(st.keys()) + tmp = [ [lst[0]] + [st[lst[1]]] + lst[2:] for lst in tmp ] + + + BAND = Options.options.dband + + seen = {} + acc = [] + for x in range(len(tmp)): + line = tmp[x] + id = line[1] + + if id in seen: + continue + seen[id] = True + + begin = line[2] + thread_id = line[0] + for y in range(x + 1, len(tmp)): + line = tmp[y] + if line[1] == id: + end = line[2] + #print id, thread_id, begin, end + #acc.append( ( 10*thread_id, 10*(thread_id+1), 10*begin, 10*end ) ) + acc.append( (BAND * begin, BAND*thread_id, BAND*end - BAND*begin, BAND, line[3], line[7]) ) + break + + if Options.options.dmaxtime < 0.1: + gwidth = 1 + for x in tmp: + m = BAND * x[2] + if m > gwidth: + gwidth = m + else: + gwidth = BAND * Options.options.dmaxtime + + ratio = float(Options.options.dwidth) / gwidth + gwidth = Options.options.dwidth + gheight = BAND * (THREAD_AMOUNT + len(info) + 1.5) + + + # simple data model for our template + class tobject(object): + pass + + model = tobject() + model.x = 0 + model.y = 0 + model.width = gwidth + 4 + model.height = gheight + 4 + + model.tooltip = not Options.options.dnotooltip + + model.title = Options.options.dtitle + model.title_x = gwidth / 2 + model.title_y = gheight + - 5 + + groups = {} + for (x, y, w, h, clsname, name) in acc: + try: + groups[clsname].append((x, y, w, h, name)) + except: + groups[clsname] = [(x, y, w, h, name)] + + # groups of rectangles (else js highlighting is slow) + model.groups = [] + for cls in groups: + g = tobject() + model.groups.append(g) + g.classname = name2class(cls) + g.rects = [] + for (x, y, w, h, name) in groups[cls]: + r = tobject() + g.rects.append(r) + r.x = 2 + x * ratio + r.y = 2 + y + r.width = w * ratio + r.height = h + r.name = name + r.color = map_to_color(cls) + + cnt = THREAD_AMOUNT + + # caption + model.infos = [] + for (text, color) in info: + inf = tobject() + model.infos.append(inf) + inf.classname = name2class(text) + inf.x = 2 + BAND + inf.y = 5 + (cnt + 0.5) * BAND + inf.width = BAND/2 + inf.height = BAND/2 + inf.color = color + + inf.text = text + inf.text_x = 2 + 2 * BAND + inf.text_y = 5 + (cnt + 0.5) * BAND + 10 + + cnt += 1 + + # write the file... + template1 = compile_template(SVG_TEMPLATE) + txt = template1(model) + + node = producer.bld.path.make_node('pdebug.svg') + node.write(txt) + Logs.warn('Created the diagram %r', node) + +def options(opt): + opt.add_option('--dtitle', action='store', default='Parallel build representation for %r' % ' '.join(sys.argv), + help='title for the svg diagram', dest='dtitle') + opt.add_option('--dwidth', action='store', type='int', help='diagram width', default=800, dest='dwidth') + opt.add_option('--dtime', action='store', type='float', help='recording interval in seconds', default=0.009, dest='dtime') + opt.add_option('--dband', action='store', type='int', help='band width', default=22, dest='dband') + opt.add_option('--dmaxtime', action='store', type='float', help='maximum time, for drawing fair comparisons', default=0, dest='dmaxtime') + opt.add_option('--dnotooltip', action='store_true', help='disable tooltips', default=False, dest='dnotooltip') + diff --git a/third_party/waf/waflib/extras/pch.py b/third_party/waf/waflib/extras/pch.py new file mode 100644 index 0000000..b44c7a2 --- /dev/null +++ b/third_party/waf/waflib/extras/pch.py @@ -0,0 +1,148 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Alexander Afanasyev (UCLA), 2014 + +""" +Enable precompiled C++ header support (currently only clang++ and g++ are supported) + +To use this tool, wscript should look like: + + def options(opt): + opt.load('pch') + # This will add `--with-pch` configure option. + # Unless --with-pch during configure stage specified, the precompiled header support is disabled + + def configure(conf): + conf.load('pch') + # this will set conf.env.WITH_PCH if --with-pch is specified and the supported compiler is used + # Unless conf.env.WITH_PCH is set, the precompiled header support is disabled + + def build(bld): + bld(features='cxx pch', + target='precompiled-headers', + name='precompiled-headers', + headers='a.h b.h c.h', # headers to pre-compile into `precompiled-headers` + + # Other parameters to compile precompiled headers + # includes=..., + # export_includes=..., + # use=..., + # ... + + # Exported parameters will be propagated even if precompiled headers are disabled + ) + + bld( + target='test', + features='cxx cxxprogram', + source='a.cpp b.cpp d.cpp main.cpp', + use='precompiled-headers', + ) + + # or + + bld( + target='test', + features='pch cxx cxxprogram', + source='a.cpp b.cpp d.cpp main.cpp', + headers='a.h b.h c.h', + ) + +Note that precompiled header must have multiple inclusion guards. If the guards are missing, any benefit of precompiled header will be voided and compilation may fail in some cases. +""" + +import os +from waflib import Task, TaskGen, Utils +from waflib.Tools import c_preproc, cxx + + +PCH_COMPILER_OPTIONS = { + 'clang++': [['-include'], '.pch', ['-x', 'c++-header']], + 'g++': [['-include'], '.gch', ['-x', 'c++-header']], +} + + +def options(opt): + opt.add_option('--without-pch', action='store_false', default=True, dest='with_pch', help='''Try to use precompiled header to speed up compilation (only g++ and clang++)''') + +def configure(conf): + if (conf.options.with_pch and conf.env['COMPILER_CXX'] in PCH_COMPILER_OPTIONS.keys()): + conf.env.WITH_PCH = True + flags = PCH_COMPILER_OPTIONS[conf.env['COMPILER_CXX']] + conf.env.CXXPCH_F = flags[0] + conf.env.CXXPCH_EXT = flags[1] + conf.env.CXXPCH_FLAGS = flags[2] + + +@TaskGen.feature('pch') +@TaskGen.before('process_source') +def apply_pch(self): + if not self.env.WITH_PCH: + return + + if getattr(self.bld, 'pch_tasks', None) is None: + self.bld.pch_tasks = {} + + if getattr(self, 'headers', None) is None: + return + + self.headers = self.to_nodes(self.headers) + + if getattr(self, 'name', None): + try: + task = self.bld.pch_tasks[self.name] + self.bld.fatal("Duplicated 'pch' task with name %r" % "%s.%s" % (self.name, self.idx)) + except KeyError: + pass + + out = '%s.%d%s' % (self.target, self.idx, self.env['CXXPCH_EXT']) + out = self.path.find_or_declare(out) + task = self.create_task('gchx', self.headers, out) + + # target should be an absolute path of `out`, but without precompiled header extension + task.target = out.abspath()[:-len(out.suffix())] + + self.pch_task = task + if getattr(self, 'name', None): + self.bld.pch_tasks[self.name] = task + +@TaskGen.feature('cxx') +@TaskGen.after_method('process_source', 'propagate_uselib_vars') +def add_pch(self): + if not (self.env['WITH_PCH'] and getattr(self, 'use', None) and getattr(self, 'compiled_tasks', None) and getattr(self.bld, 'pch_tasks', None)): + return + + pch = None + # find pch task, if any + + if getattr(self, 'pch_task', None): + pch = self.pch_task + else: + for use in Utils.to_list(self.use): + try: + pch = self.bld.pch_tasks[use] + except KeyError: + pass + + if pch: + for x in self.compiled_tasks: + x.env.append_value('CXXFLAGS', self.env['CXXPCH_F'] + [pch.target]) + +class gchx(Task.Task): + run_str = '${CXX} ${ARCH_ST:ARCH} ${CXXFLAGS} ${CXXPCH_FLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${CXXPCH_F:SRC} ${CXX_SRC_F}${SRC[0].abspath()} ${CXX_TGT_F}${TGT[0].abspath()} ${CPPFLAGS}' + scan = c_preproc.scan + color = 'BLUE' + ext_out=['.h'] + + def runnable_status(self): + try: + node_deps = self.generator.bld.node_deps[self.uid()] + except KeyError: + node_deps = [] + ret = Task.Task.runnable_status(self) + if ret == Task.SKIP_ME and self.env.CXX_NAME == 'clang': + t = os.stat(self.outputs[0].abspath()).st_mtime + for n in self.inputs + node_deps: + if os.stat(n.abspath()).st_mtime > t: + return Task.RUN_ME + return ret diff --git a/third_party/waf/waflib/extras/pep8.py b/third_party/waf/waflib/extras/pep8.py new file mode 100644 index 0000000..676beed --- /dev/null +++ b/third_party/waf/waflib/extras/pep8.py @@ -0,0 +1,106 @@ +#! /usr/bin/env python +# encoding: utf-8 +# +# written by Sylvain Rouquette, 2011 + +''' +Install pep8 module: +$ easy_install pep8 + or +$ pip install pep8 + +To add the pep8 tool to the waf file: +$ ./waf-light --tools=compat15,pep8 + or, if you have waf >= 1.6.2 +$ ./waf update --files=pep8 + + +Then add this to your wscript: + +[at]extension('.py', 'wscript') +def run_pep8(self, node): + self.create_task('Pep8', node) + +''' + +import threading +from waflib import Task, Options + +pep8 = __import__('pep8') + + +class Pep8(Task.Task): + color = 'PINK' + lock = threading.Lock() + + def check_options(self): + if pep8.options: + return + pep8.options = Options.options + pep8.options.prog = 'pep8' + excl = pep8.options.exclude.split(',') + pep8.options.exclude = [s.rstrip('/') for s in excl] + if pep8.options.filename: + pep8.options.filename = pep8.options.filename.split(',') + if pep8.options.select: + pep8.options.select = pep8.options.select.split(',') + else: + pep8.options.select = [] + if pep8.options.ignore: + pep8.options.ignore = pep8.options.ignore.split(',') + elif pep8.options.select: + # Ignore all checks which are not explicitly selected + pep8.options.ignore = [''] + elif pep8.options.testsuite or pep8.options.doctest: + # For doctest and testsuite, all checks are required + pep8.options.ignore = [] + else: + # The default choice: ignore controversial checks + pep8.options.ignore = pep8.DEFAULT_IGNORE.split(',') + pep8.options.physical_checks = pep8.find_checks('physical_line') + pep8.options.logical_checks = pep8.find_checks('logical_line') + pep8.options.counters = dict.fromkeys(pep8.BENCHMARK_KEYS, 0) + pep8.options.messages = {} + + def run(self): + with Pep8.lock: + self.check_options() + pep8.input_file(self.inputs[0].abspath()) + return 0 if not pep8.get_count() else -1 + + +def options(opt): + opt.add_option('-q', '--quiet', default=0, action='count', + help="report only file names, or nothing with -qq") + opt.add_option('-r', '--repeat', action='store_true', + help="show all occurrences of the same error") + opt.add_option('--exclude', metavar='patterns', + default=pep8.DEFAULT_EXCLUDE, + help="exclude files or directories which match these " + "comma separated patterns (default: %s)" % + pep8.DEFAULT_EXCLUDE, + dest='exclude') + opt.add_option('--filename', metavar='patterns', default='*.py', + help="when parsing directories, only check filenames " + "matching these comma separated patterns (default: " + "*.py)") + opt.add_option('--select', metavar='errors', default='', + help="select errors and warnings (e.g. E,W6)") + opt.add_option('--ignore', metavar='errors', default='', + help="skip errors and warnings (e.g. E4,W)") + opt.add_option('--show-source', action='store_true', + help="show source code for each error") + opt.add_option('--show-pep8', action='store_true', + help="show text of PEP 8 for each error") + opt.add_option('--statistics', action='store_true', + help="count errors and warnings") + opt.add_option('--count', action='store_true', + help="print total number of errors and warnings " + "to standard error and set exit code to 1 if " + "total is not null") + opt.add_option('--benchmark', action='store_true', + help="measure processing speed") + opt.add_option('--testsuite', metavar='dir', + help="run regression tests from dir") + opt.add_option('--doctest', action='store_true', + help="run doctest on myself") diff --git a/third_party/waf/waflib/extras/pgicc.py b/third_party/waf/waflib/extras/pgicc.py new file mode 100644 index 0000000..f8068d5 --- /dev/null +++ b/third_party/waf/waflib/extras/pgicc.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Antoine Dechaume 2011 + +""" +Detect the PGI C compiler +""" + +import sys, re +from waflib import Errors +from waflib.Configure import conf +from waflib.Tools.compiler_c import c_compiler +c_compiler['linux'].append('pgicc') + +@conf +def find_pgi_compiler(conf, var, name): + """ + Find the program name, and execute it to ensure it really is itself. + """ + if sys.platform == 'cygwin': + conf.fatal('The PGI compiler does not work on Cygwin') + + v = conf.env + cc = None + if v[var]: + cc = v[var] + elif var in conf.environ: + cc = conf.environ[var] + if not cc: + cc = conf.find_program(name, var=var) + if not cc: + conf.fatal('PGI Compiler (%s) was not found' % name) + + v[var + '_VERSION'] = conf.get_pgi_version(cc) + v[var] = cc + v[var + '_NAME'] = 'pgi' + +@conf +def get_pgi_version(conf, cc): + """Find the version of a pgi compiler.""" + version_re = re.compile(r"The Portland Group", re.I).search + cmd = cc + ['-V', '-E'] # Issue 1078, prevent wrappers from linking + + try: + out, err = conf.cmd_and_log(cmd, output=0) + except Errors.WafError: + conf.fatal('Could not find pgi compiler %r' % cmd) + + if out: + match = version_re(out) + else: + match = version_re(err) + + if not match: + conf.fatal('Could not verify PGI signature') + + cmd = cc + ['-help=variable'] + try: + out, err = conf.cmd_and_log(cmd, output=0) + except Errors.WafError: + conf.fatal('Could not find pgi compiler %r' % cmd) + + version = re.findall(r'^COMPVER\s*=(.*)', out, re.M) + if len(version) != 1: + conf.fatal('Could not determine the compiler version') + return version[0] + +def configure(conf): + conf.find_pgi_compiler('CC', 'pgcc') + conf.find_ar() + conf.gcc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + diff --git a/third_party/waf/waflib/extras/pgicxx.py b/third_party/waf/waflib/extras/pgicxx.py new file mode 100644 index 0000000..eae121c --- /dev/null +++ b/third_party/waf/waflib/extras/pgicxx.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Antoine Dechaume 2011 + +""" +Detect the PGI C++ compiler +""" + +from waflib.Tools.compiler_cxx import cxx_compiler +cxx_compiler['linux'].append('pgicxx') + +from waflib.extras import pgicc + +def configure(conf): + conf.find_pgi_compiler('CXX', 'pgCC') + conf.find_ar() + conf.gxx_common_flags() + conf.cxx_load_tools() + conf.cxx_add_flags() + conf.link_add_flags() diff --git a/third_party/waf/waflib/extras/proc.py b/third_party/waf/waflib/extras/proc.py new file mode 100644 index 0000000..764abec --- /dev/null +++ b/third_party/waf/waflib/extras/proc.py @@ -0,0 +1,54 @@ +#! /usr/bin/env python +# per rosengren 2011 + +from os import environ, path +from waflib import TaskGen, Utils + +def options(opt): + grp = opt.add_option_group('Oracle ProC Options') + grp.add_option('--oracle_home', action='store', default=environ.get('PROC_ORACLE'), help='Path to Oracle installation home (has bin/lib)') + grp.add_option('--tns_admin', action='store', default=environ.get('TNS_ADMIN'), help='Directory containing server list (TNS_NAMES.ORA)') + grp.add_option('--connection', action='store', default='dummy-user/dummy-password@dummy-server', help='Format: user/password@server') + +def configure(cnf): + env = cnf.env + if not env.PROC_ORACLE: + env.PROC_ORACLE = cnf.options.oracle_home + if not env.PROC_TNS_ADMIN: + env.PROC_TNS_ADMIN = cnf.options.tns_admin + if not env.PROC_CONNECTION: + env.PROC_CONNECTION = cnf.options.connection + cnf.find_program('proc', var='PROC', path_list=env.PROC_ORACLE + path.sep + 'bin') + +def proc(tsk): + env = tsk.env + gen = tsk.generator + inc_nodes = gen.to_incnodes(Utils.to_list(getattr(gen,'includes',[])) + env['INCLUDES']) + + cmd = ( + [env.PROC] + + ['SQLCHECK=SEMANTICS'] + + (['SYS_INCLUDE=(' + ','.join(env.PROC_INCLUDES) + ')'] + if env.PROC_INCLUDES else []) + + ['INCLUDE=(' + ','.join( + [i.bldpath() for i in inc_nodes] + ) + ')'] + + ['userid=' + env.PROC_CONNECTION] + + ['INAME=' + tsk.inputs[0].bldpath()] + + ['ONAME=' + tsk.outputs[0].bldpath()] + ) + exec_env = { + 'ORACLE_HOME': env.PROC_ORACLE, + 'LD_LIBRARY_PATH': env.PROC_ORACLE + path.sep + 'lib', + } + if env.PROC_TNS_ADMIN: + exec_env['TNS_ADMIN'] = env.PROC_TNS_ADMIN + return tsk.exec_command(cmd, env=exec_env) + +TaskGen.declare_chain( + name = 'proc', + rule = proc, + ext_in = '.pc', + ext_out = '.c', +) + diff --git a/third_party/waf/waflib/extras/protoc.py b/third_party/waf/waflib/extras/protoc.py new file mode 100644 index 0000000..4a519cc --- /dev/null +++ b/third_party/waf/waflib/extras/protoc.py @@ -0,0 +1,224 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Philipp Bender, 2012 +# Matt Clarkson, 2012 + +import re, os +from waflib.Task import Task +from waflib.TaskGen import extension +from waflib import Errors, Context, Logs + +""" +A simple tool to integrate protocol buffers into your build system. + +Example for C++: + + def configure(conf): + conf.load('compiler_cxx cxx protoc') + + def build(bld): + bld( + features = 'cxx cxxprogram' + source = 'main.cpp file1.proto proto/file2.proto', + includes = '. proto', + target = 'executable') + +Example for Python: + + def configure(conf): + conf.load('python protoc') + + def build(bld): + bld( + features = 'py' + source = 'main.py file1.proto proto/file2.proto', + protoc_includes = 'proto') + +Example for both Python and C++ at same time: + + def configure(conf): + conf.load('cxx python protoc') + + def build(bld): + bld( + features = 'cxx py' + source = 'file1.proto proto/file2.proto', + protoc_includes = 'proto') # or includes + + +Example for Java: + + def options(opt): + opt.load('java') + + def configure(conf): + conf.load('python java protoc') + # Here you have to point to your protobuf-java JAR and have it in classpath + conf.env.CLASSPATH_PROTOBUF = ['protobuf-java-2.5.0.jar'] + + def build(bld): + bld( + features = 'javac protoc', + name = 'pbjava', + srcdir = 'inc/ src', # directories used by javac + source = ['inc/message_inc.proto', 'inc/message.proto'], + # source is used by protoc for .proto files + use = 'PROTOBUF', + protoc_includes = ['inc']) # for protoc to search dependencies + + +Protoc includes passed via protoc_includes are either relative to the taskgen +or to the project and are searched in this order. + +Include directories external to the waf project can also be passed to the +extra by using protoc_extincludes + + protoc_extincludes = ['/usr/include/pblib'] + + +Notes when using this tool: + +- protoc command line parsing is tricky. + + The generated files can be put in subfolders which depend on + the order of the include paths. + + Try to be simple when creating task generators + containing protoc stuff. + +""" + +class protoc(Task): + run_str = '${PROTOC} ${PROTOC_FL:PROTOC_FLAGS} ${PROTOC_ST:INCPATHS} ${PROTOC_ST:PROTOC_INCPATHS} ${PROTOC_ST:PROTOC_EXTINCPATHS} ${SRC[0].bldpath()}' + color = 'BLUE' + ext_out = ['.h', 'pb.cc', '.py', '.java'] + def scan(self): + """ + Scan .proto dependencies + """ + node = self.inputs[0] + + nodes = [] + names = [] + seen = [] + search_nodes = [] + + if not node: + return (nodes, names) + + if 'cxx' in self.generator.features: + search_nodes = self.generator.includes_nodes + + if 'py' in self.generator.features or 'javac' in self.generator.features: + for incpath in getattr(self.generator, 'protoc_includes', []): + incpath_node = self.generator.path.find_node(incpath) + if incpath_node: + search_nodes.append(incpath_node) + else: + # Check if relative to top-level for extra tg dependencies + incpath_node = self.generator.bld.path.find_node(incpath) + if incpath_node: + search_nodes.append(incpath_node) + else: + raise Errors.WafError('protoc: include path %r does not exist' % incpath) + + + def parse_node(node): + if node in seen: + return + seen.append(node) + code = node.read().splitlines() + for line in code: + m = re.search(r'^import\s+"(.*)";.*(//)?.*', line) + if m: + dep = m.groups()[0] + for incnode in search_nodes: + found = incnode.find_resource(dep) + if found: + nodes.append(found) + parse_node(found) + else: + names.append(dep) + + parse_node(node) + # Add also dependencies path to INCPATHS so protoc will find the included file + for deppath in nodes: + self.env.append_unique('INCPATHS', deppath.parent.bldpath()) + return (nodes, names) + +@extension('.proto') +def process_protoc(self, node): + incdirs = [] + out_nodes = [] + protoc_flags = [] + + # ensure PROTOC_FLAGS is a list; a copy is used below anyway + self.env.PROTOC_FLAGS = self.to_list(self.env.PROTOC_FLAGS) + + if 'cxx' in self.features: + cpp_node = node.change_ext('.pb.cc') + hpp_node = node.change_ext('.pb.h') + self.source.append(cpp_node) + out_nodes.append(cpp_node) + out_nodes.append(hpp_node) + protoc_flags.append('--cpp_out=%s' % node.parent.get_bld().bldpath()) + + if 'py' in self.features: + py_node = node.change_ext('_pb2.py') + self.source.append(py_node) + out_nodes.append(py_node) + protoc_flags.append('--python_out=%s' % node.parent.get_bld().bldpath()) + + if 'javac' in self.features: + # Make javac get also pick java code generated in build + if not node.parent.get_bld() in self.javac_task.srcdir: + self.javac_task.srcdir.append(node.parent.get_bld()) + + protoc_flags.append('--java_out=%s' % node.parent.get_bld().bldpath()) + node.parent.get_bld().mkdir() + + tsk = self.create_task('protoc', node, out_nodes) + tsk.env.append_value('PROTOC_FLAGS', protoc_flags) + + if 'javac' in self.features: + self.javac_task.set_run_after(tsk) + + # Instruct protoc where to search for .proto included files. + # For C++ standard include files dirs are used, + # but this doesn't apply to Python for example + for incpath in getattr(self, 'protoc_includes', []): + incpath_node = self.path.find_node(incpath) + if incpath_node: + incdirs.append(incpath_node.bldpath()) + else: + # Check if relative to top-level for extra tg dependencies + incpath_node = self.bld.path.find_node(incpath) + if incpath_node: + incdirs.append(incpath_node.bldpath()) + else: + raise Errors.WafError('protoc: include path %r does not exist' % incpath) + + tsk.env.PROTOC_INCPATHS = incdirs + + # Include paths external to the waf project (ie. shared pb repositories) + tsk.env.PROTOC_EXTINCPATHS = getattr(self, 'protoc_extincludes', []) + + # PR2115: protoc generates output of .proto files in nested + # directories by canonicalizing paths. To avoid this we have to pass + # as first include the full directory file of the .proto file + tsk.env.prepend_value('INCPATHS', node.parent.bldpath()) + + use = getattr(self, 'use', '') + if not 'PROTOBUF' in use: + self.use = self.to_list(use) + ['PROTOBUF'] + +def configure(conf): + conf.check_cfg(package='protobuf', uselib_store='PROTOBUF', args=['--cflags', '--libs']) + conf.find_program('protoc', var='PROTOC') + conf.start_msg('Checking for protoc version') + protocver = conf.cmd_and_log(conf.env.PROTOC + ['--version'], output=Context.BOTH) + protocver = ''.join(protocver).strip()[protocver[0].rfind(' ')+1:] + conf.end_msg(protocver) + conf.env.PROTOC_MAJOR = protocver[:protocver.find('.')] + conf.env.PROTOC_ST = '-I%s' + conf.env.PROTOC_FL = '%s' diff --git a/third_party/waf/waflib/extras/pyqt5.py b/third_party/waf/waflib/extras/pyqt5.py new file mode 100644 index 0000000..0c083a1 --- /dev/null +++ b/third_party/waf/waflib/extras/pyqt5.py @@ -0,0 +1,246 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Federico Pellegrin, 2016-2022 (fedepell) adapted for Python + +""" +This tool helps with finding Python Qt5 tools and libraries, +and provides translation from QT5 files to Python code. + +The following snippet illustrates the tool usage:: + + def options(opt): + opt.load('py pyqt5') + + def configure(conf): + conf.load('py pyqt5') + + def build(bld): + bld( + features = 'py pyqt5', + source = 'main.py textures.qrc aboutDialog.ui', + ) + +Here, the UI description and resource files will be processed +to generate code. + +Usage +===== + +Load the "pyqt5" tool. + +Add into the sources list also the qrc resources files or ui5 +definition files and they will be translated into python code +with the system tools (PyQt5, PySide2, PyQt4 are searched in this +order) and then compiled +""" + +try: + from xml.sax import make_parser + from xml.sax.handler import ContentHandler +except ImportError: + has_xml = False + ContentHandler = object +else: + has_xml = True + +import os +from waflib.Tools import python +from waflib import Task, Options +from waflib.TaskGen import feature, extension +from waflib.Configure import conf +from waflib import Logs + +EXT_RCC = ['.qrc'] +""" +File extension for the resource (.qrc) files +""" + +EXT_UI = ['.ui'] +""" +File extension for the user interface (.ui) files +""" + + +class XMLHandler(ContentHandler): + """ + Parses ``.qrc`` files + """ + def __init__(self): + self.buf = [] + self.files = [] + def startElement(self, name, attrs): + if name == 'file': + self.buf = [] + def endElement(self, name): + if name == 'file': + self.files.append(str(''.join(self.buf))) + def characters(self, cars): + self.buf.append(cars) + +@extension(*EXT_RCC) +def create_pyrcc_task(self, node): + "Creates rcc and py task for ``.qrc`` files" + rcnode = node.change_ext('.py') + self.create_task('pyrcc', node, rcnode) + if getattr(self, 'install_from', None): + self.install_from = self.install_from.get_bld() + else: + self.install_from = self.path.get_bld() + self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') + self.process_py(rcnode) + +@extension(*EXT_UI) +def create_pyuic_task(self, node): + "Create uic tasks and py for user interface ``.ui`` definition files" + uinode = node.change_ext('.py') + self.create_task('ui5py', node, uinode) + if getattr(self, 'install_from', None): + self.install_from = self.install_from.get_bld() + else: + self.install_from = self.path.get_bld() + self.install_path = getattr(self, 'install_path', '${PYTHONDIR}') + self.process_py(uinode) + +@extension('.ts') +def add_pylang(self, node): + """Adds all the .ts file into ``self.lang``""" + self.lang = self.to_list(getattr(self, 'lang', [])) + [node] + +@feature('pyqt5') +def apply_pyqt5(self): + """ + The additional parameters are: + + :param lang: list of translation files (\\*.ts) to process + :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension + :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file + :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension + """ + if getattr(self, 'lang', None): + qmtasks = [] + for x in self.to_list(self.lang): + if isinstance(x, str): + x = self.path.find_resource(x + '.ts') + qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm'))) + + + if getattr(self, 'langname', None): + qmnodes = [k.outputs[0] for k in qmtasks] + rcnode = self.langname + if isinstance(rcnode, str): + rcnode = self.path.find_or_declare(rcnode + '.qrc') + t = self.create_task('qm2rcc', qmnodes, rcnode) + create_pyrcc_task(self, t.outputs[0]) + +class pyrcc(Task.Task): + """ + Processes ``.qrc`` files + """ + color = 'BLUE' + run_str = '${QT_PYRCC} ${QT_PYRCC_FLAGS} ${SRC} -o ${TGT}' + ext_out = ['.py'] + + def rcname(self): + return os.path.splitext(self.inputs[0].name)[0] + + def scan(self): + """Parse the *.qrc* files""" + if not has_xml: + Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') + return ([], []) + + parser = make_parser() + curHandler = XMLHandler() + parser.setContentHandler(curHandler) + fi = open(self.inputs[0].abspath(), 'r') + try: + parser.parse(fi) + finally: + fi.close() + + nodes = [] + names = [] + root = self.inputs[0].parent + for x in curHandler.files: + nd = root.find_resource(x) + if nd: + nodes.append(nd) + else: + names.append(x) + return (nodes, names) + + +class ui5py(Task.Task): + """ + Processes ``.ui`` files for python + """ + color = 'BLUE' + run_str = '${QT_PYUIC} ${QT_PYUIC_FLAGS} ${SRC} -o ${TGT}' + ext_out = ['.py'] + +class ts2qm(Task.Task): + """ + Generates ``.qm`` files from ``.ts`` files + """ + color = 'BLUE' + run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' + +class qm2rcc(Task.Task): + """ + Generates ``.qrc`` files from ``.qm`` files + """ + color = 'BLUE' + after = 'ts2qm' + def run(self): + """Create a qrc file including the inputs""" + txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs]) + code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt + self.outputs[0].write(code) + +def configure(self): + self.find_pyqt5_binaries() + + # warn about this during the configuration too + if not has_xml: + Logs.error('No xml.sax support was found, rcc dependencies will be incomplete!') + +@conf +def find_pyqt5_binaries(self): + """ + Detects PyQt5 or PySide2 programs such as pyuic5/pyside2-uic, pyrcc5/pyside2-rcc + """ + env = self.env + + if getattr(Options.options, 'want_pyqt5', True): + self.find_program(['pyuic5'], var='QT_PYUIC') + self.find_program(['pyrcc5'], var='QT_PYRCC') + self.find_program(['pylupdate5'], var='QT_PYLUPDATE') + elif getattr(Options.options, 'want_pyside2', True): + self.find_program(['pyside2-uic','uic-qt5'], var='QT_PYUIC') + self.find_program(['pyside2-rcc','rcc-qt5'], var='QT_PYRCC') + self.find_program(['pyside2-lupdate','lupdate-qt5'], var='QT_PYLUPDATE') + elif getattr(Options.options, 'want_pyqt4', True): + self.find_program(['pyuic4'], var='QT_PYUIC') + self.find_program(['pyrcc4'], var='QT_PYRCC') + self.find_program(['pylupdate4'], var='QT_PYLUPDATE') + else: + self.find_program(['pyuic5','pyside2-uic','pyuic4','uic-qt5'], var='QT_PYUIC') + self.find_program(['pyrcc5','pyside2-rcc','pyrcc4','rcc-qt5'], var='QT_PYRCC') + self.find_program(['pylupdate5', 'pyside2-lupdate','pylupdate4','lupdate-qt5'], var='QT_PYLUPDATE') + + if not env.QT_PYUIC: + self.fatal('cannot find the uic compiler for python for qt5') + + if not env.QT_PYRCC: + self.fatal('cannot find the rcc compiler for python for qt5') + + self.find_program(['lrelease-qt5', 'lrelease'], var='QT_LRELEASE') + +def options(opt): + """ + Command-line options + """ + pyqt5opt=opt.add_option_group("Python QT5 Options") + pyqt5opt.add_option('--pyqt5-pyqt5', action='store_true', default=False, dest='want_pyqt5', help='use PyQt5 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') + pyqt5opt.add_option('--pyqt5-pyside2', action='store_true', default=False, dest='want_pyside2', help='use PySide2 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') + pyqt5opt.add_option('--pyqt5-pyqt4', action='store_true', default=False, dest='want_pyqt4', help='use PyQt4 bindings as python QT5 bindings (default PyQt5 is searched first, PySide2 after, PyQt4 last)') diff --git a/third_party/waf/waflib/extras/pytest.py b/third_party/waf/waflib/extras/pytest.py new file mode 100644 index 0000000..fc9ad1c --- /dev/null +++ b/third_party/waf/waflib/extras/pytest.py @@ -0,0 +1,240 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Calle Rosenquist, 2016-2018 (xbreak) + +""" +Provides Python unit test support using :py:class:`waflib.Tools.waf_unit_test.utest` +task via the **pytest** feature. + +To use pytest the following is needed: + +1. Load `pytest` and the dependency `waf_unit_test` tools. +2. Create a task generator with feature `pytest` (not `test`) and customize behaviour with + the following attributes: + + - `pytest_source`: Test input files. + - `ut_str`: Test runner command, e.g. ``${PYTHON} -B -m unittest discover`` or + if nose is used: ``${NOSETESTS} --no-byte-compile ${SRC}``. + - `ut_shell`: Determines if ``ut_str`` is executed in a shell. Default: False. + - `ut_cwd`: Working directory for test runner. Defaults to directory of + first ``pytest_source`` file. + + Additionally the following `pytest` specific attributes are used in dependent taskgens: + + - `pytest_path`: Node or string list of additional Python paths. + - `pytest_libpath`: Node or string list of additional library paths. + +The `use` dependencies are used for both update calculation and to populate +the following environment variables for the `pytest` test runner: + +1. `PYTHONPATH` (`sys.path`) of any dependent taskgen that has the feature `py`: + + - `install_from` attribute is used to determine where the root of the Python sources + are located. If `install_from` is not specified the default is to use the taskgen path + as the root. + + - `pytest_path` attribute is used to manually specify additional Python paths. + +2. Dynamic linker search path variable (e.g. `LD_LIBRARY_PATH`) of any dependent taskgen with + non-static link_task. + + - `pytest_libpath` attribute is used to manually specify additional linker paths. + +3. Java class search path (CLASSPATH) of any Java/Javalike dependency + +Note: `pytest` cannot automatically determine the correct `PYTHONPATH` for `pyext` taskgens + because the extension might be part of a Python package or used standalone: + + - When used as part of another `py` package, the `PYTHONPATH` is provided by + that taskgen so no additional action is required. + + - When used as a standalone module, the user needs to specify the `PYTHONPATH` explicitly + via the `pytest_path` attribute on the `pyext` taskgen. + + For details c.f. the pytest playground examples. + + +For example:: + + # A standalone Python C extension that demonstrates unit test environment population + # of PYTHONPATH and LD_LIBRARY_PATH/PATH/DYLD_LIBRARY_PATH. + # + # Note: `pytest_path` is provided here because pytest cannot automatically determine + # if the extension is part of another Python package or is used standalone. + bld(name = 'foo_ext', + features = 'c cshlib pyext', + source = 'src/foo_ext.c', + target = 'foo_ext', + pytest_path = [ bld.path.get_bld() ]) + + # Python package under test that also depend on the Python module `foo_ext` + # + # Note: `install_from` is added automatically to `PYTHONPATH`. + bld(name = 'foo', + features = 'py', + use = 'foo_ext', + source = bld.path.ant_glob('src/foo/*.py'), + install_from = 'src') + + # Unit test example using the built in module unittest and let that discover + # any test cases. + bld(name = 'foo_test', + features = 'pytest', + use = 'foo', + pytest_source = bld.path.ant_glob('test/*.py'), + ut_str = '${PYTHON} -B -m unittest discover') + +""" + +import os +from waflib import Task, TaskGen, Errors, Utils, Logs +from waflib.Tools import ccroot + +def _process_use_rec(self, name): + """ + Recursively process ``use`` for task generator with name ``name``.. + Used by pytest_process_use. + """ + if name in self.pytest_use_not or name in self.pytest_use_seen: + return + try: + tg = self.bld.get_tgen_by_name(name) + except Errors.WafError: + self.pytest_use_not.add(name) + return + + self.pytest_use_seen.append(name) + tg.post() + + for n in self.to_list(getattr(tg, 'use', [])): + _process_use_rec(self, n) + + +@TaskGen.feature('pytest') +@TaskGen.after_method('process_source', 'apply_link') +def pytest_process_use(self): + """ + Process the ``use`` attribute which contains a list of task generator names and store + paths that later is used to populate the unit test runtime environment. + """ + self.pytest_use_not = set() + self.pytest_use_seen = [] + self.pytest_paths = [] # strings or Nodes + self.pytest_libpaths = [] # strings or Nodes + self.pytest_javapaths = [] # strings or Nodes + self.pytest_dep_nodes = [] + + names = self.to_list(getattr(self, 'use', [])) + for name in names: + _process_use_rec(self, name) + + def extend_unique(lst, varlst): + ext = [] + for x in varlst: + if x not in lst: + ext.append(x) + lst.extend(ext) + + # Collect type specific info needed to construct a valid runtime environment + # for the test. + for name in self.pytest_use_seen: + tg = self.bld.get_tgen_by_name(name) + + extend_unique(self.pytest_paths, Utils.to_list(getattr(tg, 'pytest_path', []))) + extend_unique(self.pytest_libpaths, Utils.to_list(getattr(tg, 'pytest_libpath', []))) + + if 'py' in tg.features: + # Python dependencies are added to PYTHONPATH + pypath = getattr(tg, 'install_from', tg.path) + + if 'buildcopy' in tg.features: + # Since buildcopy is used we assume that PYTHONPATH in build should be used, + # not source + extend_unique(self.pytest_paths, [pypath.get_bld().abspath()]) + + # Add buildcopy output nodes to dependencies + extend_unique(self.pytest_dep_nodes, [o for task in getattr(tg, 'tasks', []) \ + for o in getattr(task, 'outputs', [])]) + else: + # If buildcopy is not used, depend on sources instead + extend_unique(self.pytest_dep_nodes, tg.source) + extend_unique(self.pytest_paths, [pypath.abspath()]) + + if 'javac' in tg.features: + # If a JAR is generated point to that, otherwise to directory + if getattr(tg, 'jar_task', None): + extend_unique(self.pytest_javapaths, [tg.jar_task.outputs[0].abspath()]) + else: + extend_unique(self.pytest_javapaths, [tg.path.get_bld()]) + + # And add respective dependencies if present + if tg.use_lst: + extend_unique(self.pytest_javapaths, tg.use_lst) + + if getattr(tg, 'link_task', None): + # For tasks with a link_task (C, C++, D et.c.) include their library paths: + if not isinstance(tg.link_task, ccroot.stlink_task): + extend_unique(self.pytest_dep_nodes, tg.link_task.outputs) + extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH) + + if 'pyext' in tg.features: + # If the taskgen is extending Python we also want to add the interpreter libpath. + extend_unique(self.pytest_libpaths, tg.link_task.env.LIBPATH_PYEXT) + else: + # Only add to libpath if the link task is not a Python extension + extend_unique(self.pytest_libpaths, [tg.link_task.outputs[0].parent.abspath()]) + + +@TaskGen.feature('pytest') +@TaskGen.after_method('pytest_process_use') +def make_pytest(self): + """ + Creates a ``utest`` task with a populated environment for Python if not specified in ``ut_env``: + + - Paths in `pytest_paths` attribute are used to populate PYTHONPATH + - Paths in `pytest_libpaths` attribute are used to populate the system library path (e.g. LD_LIBRARY_PATH) + """ + nodes = self.to_nodes(self.pytest_source) + tsk = self.create_task('utest', nodes) + + tsk.dep_nodes.extend(self.pytest_dep_nodes) + if getattr(self, 'ut_str', None): + self.ut_run, lst = Task.compile_fun(self.ut_str, shell=getattr(self, 'ut_shell', False)) + tsk.vars = lst + tsk.vars + + if getattr(self, 'ut_cwd', None): + if isinstance(self.ut_cwd, str): + # we want a Node instance + if os.path.isabs(self.ut_cwd): + self.ut_cwd = self.bld.root.make_node(self.ut_cwd) + else: + self.ut_cwd = self.path.make_node(self.ut_cwd) + else: + if tsk.inputs: + self.ut_cwd = tsk.inputs[0].parent + else: + raise Errors.WafError("no valid input files for pytest task, check pytest_source value") + + if not self.ut_cwd.exists(): + self.ut_cwd.mkdir() + + if not hasattr(self, 'ut_env'): + self.ut_env = dict(os.environ) + def add_paths(var, lst): + # Add list of paths to a variable, lst can contain strings or nodes + lst = [ str(n) for n in lst ] + Logs.debug("ut: %s: Adding paths %s=%s", self, var, lst) + self.ut_env[var] = os.pathsep.join(lst) + os.pathsep + self.ut_env.get(var, '') + + # Prepend dependency paths to PYTHONPATH, CLASSPATH and LD_LIBRARY_PATH + add_paths('PYTHONPATH', self.pytest_paths) + add_paths('CLASSPATH', self.pytest_javapaths) + + if Utils.is_win32: + add_paths('PATH', self.pytest_libpaths) + elif Utils.unversioned_sys_platform() == 'darwin': + add_paths('DYLD_LIBRARY_PATH', self.pytest_libpaths) + add_paths('LD_LIBRARY_PATH', self.pytest_libpaths) + else: + add_paths('LD_LIBRARY_PATH', self.pytest_libpaths) + diff --git a/third_party/waf/waflib/extras/qnxnto.py b/third_party/waf/waflib/extras/qnxnto.py new file mode 100644 index 0000000..1158124 --- /dev/null +++ b/third_party/waf/waflib/extras/qnxnto.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Jérôme Carretero 2011 (zougloub) +# QNX neutrino compatibility functions + +import sys, os +from waflib import Utils + +class Popen(object): + """ + Popen cannot work on QNX from a threaded program: + Forking in threads is not implemented in neutrino. + + Python's os.popen / spawn / fork won't work when running in threads (they will if in the main program thread) + + In waf, this happens mostly in build. + And the use cases can be replaced by os.system() calls. + """ + __slots__ = ["prog", "kw", "popen", "verbose"] + verbose = 0 + def __init__(self, prog, **kw): + try: + self.prog = prog + self.kw = kw + self.popen = None + if Popen.verbose: + sys.stdout.write("Popen created: %r, kw=%r..." % (prog, kw)) + + do_delegate = kw.get('stdout') == -1 and kw.get('stderr') == -1 + if do_delegate: + if Popen.verbose: + print("Delegating to real Popen") + self.popen = self.real_Popen(prog, **kw) + else: + if Popen.verbose: + print("Emulating") + except Exception as e: + if Popen.verbose: + print("Exception: %s" % e) + raise + + def __getattr__(self, name): + if Popen.verbose: + sys.stdout.write("Getattr: %s..." % name) + if name in Popen.__slots__: + return object.__getattribute__(self, name) + else: + if self.popen is not None: + if Popen.verbose: + print("from Popen") + return getattr(self.popen, name) + else: + if name == "wait": + return self.emu_wait + else: + raise Exception("subprocess emulation: not implemented: %s" % name) + + def emu_wait(self): + if Popen.verbose: + print("emulated wait (%r kw=%r)" % (self.prog, self.kw)) + if isinstance(self.prog, str): + cmd = self.prog + else: + cmd = " ".join(self.prog) + if 'cwd' in self.kw: + cmd = 'cd "%s" && %s' % (self.kw['cwd'], cmd) + return os.system(cmd) + +if sys.platform == "qnx6": + Popen.real_Popen = Utils.subprocess.Popen + Utils.subprocess.Popen = Popen + diff --git a/third_party/waf/waflib/extras/qt4.py b/third_party/waf/waflib/extras/qt4.py new file mode 100644 index 0000000..d19a4dd --- /dev/null +++ b/third_party/waf/waflib/extras/qt4.py @@ -0,0 +1,695 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2006-2010 (ita) + +""" + +Tool Description +================ + +This tool helps with finding Qt4 tools and libraries, +and also provides syntactic sugar for using Qt4 tools. + +The following snippet illustrates the tool usage:: + + def options(opt): + opt.load('compiler_cxx qt4') + + def configure(conf): + conf.load('compiler_cxx qt4') + + def build(bld): + bld( + features = 'qt4 cxx cxxprogram', + uselib = 'QTCORE QTGUI QTOPENGL QTSVG', + source = 'main.cpp textures.qrc aboutDialog.ui', + target = 'window', + ) + +Here, the UI description and resource files will be processed +to generate code. + +Usage +===== + +Load the "qt4" tool. + +You also need to edit your sources accordingly: + +- the normal way of doing things is to have your C++ files + include the .moc file. + This is regarded as the best practice (and provides much faster + compilations). + It also implies that the include paths have beenset properly. + +- to have the include paths added automatically, use the following:: + + from waflib.TaskGen import feature, before_method, after_method + @feature('cxx') + @after_method('process_source') + @before_method('apply_incpaths') + def add_includes_paths(self): + incs = set(self.to_list(getattr(self, 'includes', ''))) + for x in self.compiled_tasks: + incs.add(x.inputs[0].parent.path_from(self.path)) + self.includes = sorted(incs) + +Note: another tool provides Qt processing that does not require +.moc includes, see 'playground/slow_qt/'. + +A few options (--qt{dir,bin,...}) and environment variables +(QT4_{ROOT,DIR,MOC,UIC,XCOMPILE}) allow finer tuning of the tool, +tool path selection, etc; please read the source for more info. + +""" + +try: + from xml.sax import make_parser + from xml.sax.handler import ContentHandler +except ImportError: + has_xml = False + ContentHandler = object +else: + has_xml = True + +import os, sys +from waflib.Tools import cxx +from waflib import Task, Utils, Options, Errors, Context +from waflib.TaskGen import feature, after_method, extension +from waflib.Configure import conf +from waflib import Logs + +MOC_H = ['.h', '.hpp', '.hxx', '.hh'] +""" +File extensions associated to the .moc files +""" + +EXT_RCC = ['.qrc'] +""" +File extension for the resource (.qrc) files +""" + +EXT_UI = ['.ui'] +""" +File extension for the user interface (.ui) files +""" + +EXT_QT4 = ['.cpp', '.cc', '.cxx', '.C'] +""" +File extensions of C++ files that may require a .moc processing +""" + +QT4_LIBS = "QtCore QtGui QtUiTools QtNetwork QtOpenGL QtSql QtSvg QtTest QtXml QtXmlPatterns QtWebKit Qt3Support QtHelp QtScript QtDeclarative QtDesigner" + +class qxx(Task.classes['cxx']): + """ + Each C++ file can have zero or several .moc files to create. + They are known only when the files are scanned (preprocessor) + To avoid scanning the c++ files each time (parsing C/C++), the results + are retrieved from the task cache (bld.node_deps/bld.raw_deps). + The moc tasks are also created *dynamically* during the build. + """ + + def __init__(self, *k, **kw): + Task.Task.__init__(self, *k, **kw) + self.moc_done = 0 + + def runnable_status(self): + """ + Compute the task signature to make sure the scanner was executed. Create the + moc tasks by using :py:meth:`waflib.Tools.qt4.qxx.add_moc_tasks` (if necessary), + then postpone the task execution (there is no need to recompute the task signature). + """ + if self.moc_done: + return Task.Task.runnable_status(self) + else: + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + self.add_moc_tasks() + return Task.Task.runnable_status(self) + + def create_moc_task(self, h_node, m_node): + """ + If several libraries use the same classes, it is possible that moc will run several times (Issue 1318) + It is not possible to change the file names, but we can assume that the moc transformation will be identical, + and the moc tasks can be shared in a global cache. + + The defines passed to moc will then depend on task generator order. If this is not acceptable, then + use the tool slow_qt4 instead (and enjoy the slow builds... :-( ) + """ + try: + moc_cache = self.generator.bld.moc_cache + except AttributeError: + moc_cache = self.generator.bld.moc_cache = {} + + try: + return moc_cache[h_node] + except KeyError: + tsk = moc_cache[h_node] = Task.classes['moc'](env=self.env, generator=self.generator) + tsk.set_inputs(h_node) + tsk.set_outputs(m_node) + + if self.generator: + self.generator.tasks.append(tsk) + + # direct injection in the build phase (safe because called from the main thread) + gen = self.generator.bld.producer + gen.outstanding.append(tsk) + gen.total += 1 + + return tsk + + def moc_h_ext(self): + ext = [] + try: + ext = Options.options.qt_header_ext.split() + except AttributeError: + pass + if not ext: + ext = MOC_H + return ext + + def add_moc_tasks(self): + """ + Create the moc tasks by looking in ``bld.raw_deps[self.uid()]`` + """ + node = self.inputs[0] + bld = self.generator.bld + + try: + # compute the signature once to know if there is a moc file to create + self.signature() + except KeyError: + # the moc file may be referenced somewhere else + pass + else: + # remove the signature, it must be recomputed with the moc task + delattr(self, 'cache_sig') + + include_nodes = [node.parent] + self.generator.includes_nodes + + moctasks = [] + mocfiles = set() + for d in bld.raw_deps.get(self.uid(), []): + if not d.endswith('.moc'): + continue + + # process that base.moc only once + if d in mocfiles: + continue + mocfiles.add(d) + + # find the source associated with the moc file + h_node = None + + base2 = d[:-4] + for x in include_nodes: + for e in self.moc_h_ext(): + h_node = x.find_node(base2 + e) + if h_node: + break + if h_node: + m_node = h_node.change_ext('.moc') + break + else: + # foo.cpp -> foo.cpp.moc + for k in EXT_QT4: + if base2.endswith(k): + for x in include_nodes: + h_node = x.find_node(base2) + if h_node: + break + if h_node: + m_node = h_node.change_ext(k + '.moc') + break + + if not h_node: + raise Errors.WafError('No source found for %r which is a moc file' % d) + + # create the moc task + task = self.create_moc_task(h_node, m_node) + moctasks.append(task) + + # simple scheduler dependency: run the moc task before others + self.run_after.update(set(moctasks)) + self.moc_done = 1 + +class trans_update(Task.Task): + """Update a .ts files from a list of C++ files""" + run_str = '${QT_LUPDATE} ${SRC} -ts ${TGT}' + color = 'BLUE' + +class XMLHandler(ContentHandler): + """ + Parser for *.qrc* files + """ + def __init__(self): + self.buf = [] + self.files = [] + def startElement(self, name, attrs): + if name == 'file': + self.buf = [] + def endElement(self, name): + if name == 'file': + self.files.append(str(''.join(self.buf))) + def characters(self, cars): + self.buf.append(cars) + +@extension(*EXT_RCC) +def create_rcc_task(self, node): + "Create rcc and cxx tasks for *.qrc* files" + rcnode = node.change_ext('_rc.cpp') + self.create_task('rcc', node, rcnode) + cpptask = self.create_task('cxx', rcnode, rcnode.change_ext('.o')) + try: + self.compiled_tasks.append(cpptask) + except AttributeError: + self.compiled_tasks = [cpptask] + return cpptask + +@extension(*EXT_UI) +def create_uic_task(self, node): + "hook for uic tasks" + uictask = self.create_task('ui4', node) + uictask.outputs = [self.path.find_or_declare(self.env['ui_PATTERN'] % node.name[:-3])] + +@extension('.ts') +def add_lang(self, node): + """add all the .ts file into self.lang""" + self.lang = self.to_list(getattr(self, 'lang', [])) + [node] + +@feature('qt4') +@after_method('apply_link') +def apply_qt4(self): + """ + Add MOC_FLAGS which may be necessary for moc:: + + def build(bld): + bld.program(features='qt4', source='main.cpp', target='app', use='QTCORE') + + The additional parameters are: + + :param lang: list of translation files (\\*.ts) to process + :type lang: list of :py:class:`waflib.Node.Node` or string without the .ts extension + :param update: whether to process the C++ files to update the \\*.ts files (use **waf --translate**) + :type update: bool + :param langname: if given, transform the \\*.ts files into a .qrc files to include in the binary file + :type langname: :py:class:`waflib.Node.Node` or string without the .qrc extension + """ + if getattr(self, 'lang', None): + qmtasks = [] + for x in self.to_list(self.lang): + if isinstance(x, str): + x = self.path.find_resource(x + '.ts') + qmtasks.append(self.create_task('ts2qm', x, x.change_ext('.qm'))) + + if getattr(self, 'update', None) and Options.options.trans_qt4: + cxxnodes = [a.inputs[0] for a in self.compiled_tasks] + [ + a.inputs[0] for a in self.tasks if getattr(a, 'inputs', None) and a.inputs[0].name.endswith('.ui')] + for x in qmtasks: + self.create_task('trans_update', cxxnodes, x.inputs) + + if getattr(self, 'langname', None): + qmnodes = [x.outputs[0] for x in qmtasks] + rcnode = self.langname + if isinstance(rcnode, str): + rcnode = self.path.find_or_declare(rcnode + '.qrc') + t = self.create_task('qm2rcc', qmnodes, rcnode) + k = create_rcc_task(self, t.outputs[0]) + self.link_task.inputs.append(k.outputs[0]) + + lst = [] + for flag in self.to_list(self.env['CXXFLAGS']): + if len(flag) < 2: + continue + f = flag[0:2] + if f in ('-D', '-I', '/D', '/I'): + if (f[0] == '/'): + lst.append('-' + flag[1:]) + else: + lst.append(flag) + self.env.append_value('MOC_FLAGS', lst) + +@extension(*EXT_QT4) +def cxx_hook(self, node): + """ + Re-map C++ file extensions to the :py:class:`waflib.Tools.qt4.qxx` task. + """ + return self.create_compiled_task('qxx', node) + +class rcc(Task.Task): + """ + Process *.qrc* files + """ + color = 'BLUE' + run_str = '${QT_RCC} -name ${tsk.rcname()} ${SRC[0].abspath()} ${RCC_ST} -o ${TGT}' + ext_out = ['.h'] + + def rcname(self): + return os.path.splitext(self.inputs[0].name)[0] + + def scan(self): + """Parse the *.qrc* files""" + if not has_xml: + Logs.error('no xml support was found, the rcc dependencies will be incomplete!') + return ([], []) + + parser = make_parser() + curHandler = XMLHandler() + parser.setContentHandler(curHandler) + fi = open(self.inputs[0].abspath(), 'r') + try: + parser.parse(fi) + finally: + fi.close() + + nodes = [] + names = [] + root = self.inputs[0].parent + for x in curHandler.files: + nd = root.find_resource(x) + if nd: + nodes.append(nd) + else: + names.append(x) + return (nodes, names) + +class moc(Task.Task): + """ + Create *.moc* files + """ + color = 'BLUE' + run_str = '${QT_MOC} ${MOC_FLAGS} ${MOCCPPPATH_ST:INCPATHS} ${MOCDEFINES_ST:DEFINES} ${SRC} ${MOC_ST} ${TGT}' + def keyword(self): + return "Creating" + def __str__(self): + return self.outputs[0].path_from(self.generator.bld.launch_node()) + +class ui4(Task.Task): + """ + Process *.ui* files + """ + color = 'BLUE' + run_str = '${QT_UIC} ${SRC} -o ${TGT}' + ext_out = ['.h'] + +class ts2qm(Task.Task): + """ + Create *.qm* files from *.ts* files + """ + color = 'BLUE' + run_str = '${QT_LRELEASE} ${QT_LRELEASE_FLAGS} ${SRC} -qm ${TGT}' + +class qm2rcc(Task.Task): + """ + Transform *.qm* files into *.rc* files + """ + color = 'BLUE' + after = 'ts2qm' + + def run(self): + """Create a qrc file including the inputs""" + txt = '\n'.join(['<file>%s</file>' % k.path_from(self.outputs[0].parent) for k in self.inputs]) + code = '<!DOCTYPE RCC><RCC version="1.0">\n<qresource>\n%s\n</qresource>\n</RCC>' % txt + self.outputs[0].write(code) + +def configure(self): + """ + Besides the configuration options, the environment variable QT4_ROOT may be used + to give the location of the qt4 libraries (absolute path). + + The detection will use the program *pkg-config* through :py:func:`waflib.Tools.config_c.check_cfg` + """ + self.find_qt4_binaries() + self.set_qt4_libs_to_check() + self.set_qt4_defines() + self.find_qt4_libraries() + self.add_qt4_rpath() + self.simplify_qt4_libs() + +@conf +def find_qt4_binaries(self): + env = self.env + opt = Options.options + + qtdir = getattr(opt, 'qtdir', '') + qtbin = getattr(opt, 'qtbin', '') + + paths = [] + + if qtdir: + qtbin = os.path.join(qtdir, 'bin') + + # the qt directory has been given from QT4_ROOT - deduce the qt binary path + if not qtdir: + qtdir = os.environ.get('QT4_ROOT', '') + qtbin = os.environ.get('QT4_BIN') or os.path.join(qtdir, 'bin') + + if qtbin: + paths = [qtbin] + + # no qtdir, look in the path and in /usr/local/Trolltech + if not qtdir: + paths = os.environ.get('PATH', '').split(os.pathsep) + paths.append('/usr/share/qt4/bin/') + try: + lst = Utils.listdir('/usr/local/Trolltech/') + except OSError: + pass + else: + if lst: + lst.sort() + lst.reverse() + + # keep the highest version + qtdir = '/usr/local/Trolltech/%s/' % lst[0] + qtbin = os.path.join(qtdir, 'bin') + paths.append(qtbin) + + # at the end, try to find qmake in the paths given + # keep the one with the highest version + cand = None + prev_ver = ['4', '0', '0'] + for qmk in ('qmake-qt4', 'qmake4', 'qmake'): + try: + qmake = self.find_program(qmk, path_list=paths) + except self.errors.ConfigurationError: + pass + else: + try: + version = self.cmd_and_log(qmake + ['-query', 'QT_VERSION']).strip() + except self.errors.WafError: + pass + else: + if version: + new_ver = version.split('.') + if new_ver > prev_ver: + cand = qmake + prev_ver = new_ver + if cand: + self.env.QMAKE = cand + else: + self.fatal('Could not find qmake for qt4') + + qtbin = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_BINS']).strip() + os.sep + + def find_bin(lst, var): + if var in env: + return + for f in lst: + try: + ret = self.find_program(f, path_list=paths) + except self.errors.ConfigurationError: + pass + else: + env[var]=ret + break + + find_bin(['uic-qt3', 'uic3'], 'QT_UIC3') + find_bin(['uic-qt4', 'uic'], 'QT_UIC') + if not env.QT_UIC: + self.fatal('cannot find the uic compiler for qt4') + + self.start_msg('Checking for uic version') + uicver = self.cmd_and_log(env.QT_UIC + ["-version"], output=Context.BOTH) + uicver = ''.join(uicver).strip() + uicver = uicver.replace('Qt User Interface Compiler ','').replace('User Interface Compiler for Qt', '') + self.end_msg(uicver) + if uicver.find(' 3.') != -1: + self.fatal('this uic compiler is for qt3, add uic for qt4 to your path') + + find_bin(['moc-qt4', 'moc'], 'QT_MOC') + find_bin(['rcc-qt4', 'rcc'], 'QT_RCC') + find_bin(['lrelease-qt4', 'lrelease'], 'QT_LRELEASE') + find_bin(['lupdate-qt4', 'lupdate'], 'QT_LUPDATE') + + env['UIC3_ST']= '%s -o %s' + env['UIC_ST'] = '%s -o %s' + env['MOC_ST'] = '-o' + env['ui_PATTERN'] = 'ui_%s.h' + env['QT_LRELEASE_FLAGS'] = ['-silent'] + env.MOCCPPPATH_ST = '-I%s' + env.MOCDEFINES_ST = '-D%s' + +@conf +def find_qt4_libraries(self): + qtlibs = getattr(Options.options, 'qtlibs', None) or os.environ.get("QT4_LIBDIR") + if not qtlibs: + try: + qtlibs = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_LIBS']).strip() + except Errors.WafError: + qtdir = self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_PREFIX']).strip() + os.sep + qtlibs = os.path.join(qtdir, 'lib') + self.msg('Found the Qt4 libraries in', qtlibs) + + qtincludes = os.environ.get("QT4_INCLUDES") or self.cmd_and_log(self.env.QMAKE + ['-query', 'QT_INSTALL_HEADERS']).strip() + env = self.env + if not 'PKG_CONFIG_PATH' in os.environ: + os.environ['PKG_CONFIG_PATH'] = '%s:%s/pkgconfig:/usr/lib/qt4/lib/pkgconfig:/opt/qt4/lib/pkgconfig:/usr/lib/qt4/lib:/opt/qt4/lib' % (qtlibs, qtlibs) + + try: + if os.environ.get("QT4_XCOMPILE"): + raise self.errors.ConfigurationError() + self.check_cfg(atleast_pkgconfig_version='0.1') + except self.errors.ConfigurationError: + for i in self.qt4_vars: + uselib = i.upper() + if Utils.unversioned_sys_platform() == "darwin": + # Since at least qt 4.7.3 each library locates in separate directory + frameworkName = i + ".framework" + qtDynamicLib = os.path.join(qtlibs, frameworkName, i) + if os.path.exists(qtDynamicLib): + env.append_unique('FRAMEWORK_' + uselib, i) + self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') + else: + self.msg('Checking for %s' % i, False, 'YELLOW') + env.append_unique('INCLUDES_' + uselib, os.path.join(qtlibs, frameworkName, 'Headers')) + elif env.DEST_OS != "win32": + qtDynamicLib = os.path.join(qtlibs, "lib" + i + ".so") + qtStaticLib = os.path.join(qtlibs, "lib" + i + ".a") + if os.path.exists(qtDynamicLib): + env.append_unique('LIB_' + uselib, i) + self.msg('Checking for %s' % i, qtDynamicLib, 'GREEN') + elif os.path.exists(qtStaticLib): + env.append_unique('LIB_' + uselib, i) + self.msg('Checking for %s' % i, qtStaticLib, 'GREEN') + else: + self.msg('Checking for %s' % i, False, 'YELLOW') + + env.append_unique('LIBPATH_' + uselib, qtlibs) + env.append_unique('INCLUDES_' + uselib, qtincludes) + env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) + else: + # Release library names are like QtCore4 + for k in ("lib%s.a", "lib%s4.a", "%s.lib", "%s4.lib"): + lib = os.path.join(qtlibs, k % i) + if os.path.exists(lib): + env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')]) + self.msg('Checking for %s' % i, lib, 'GREEN') + break + else: + self.msg('Checking for %s' % i, False, 'YELLOW') + + env.append_unique('LIBPATH_' + uselib, qtlibs) + env.append_unique('INCLUDES_' + uselib, qtincludes) + env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) + + # Debug library names are like QtCore4d + uselib = i.upper() + "_debug" + for k in ("lib%sd.a", "lib%sd4.a", "%sd.lib", "%sd4.lib"): + lib = os.path.join(qtlibs, k % i) + if os.path.exists(lib): + env.append_unique('LIB_' + uselib, i + k[k.find("%s") + 2 : k.find('.')]) + self.msg('Checking for %s' % i, lib, 'GREEN') + break + else: + self.msg('Checking for %s' % i, False, 'YELLOW') + + env.append_unique('LIBPATH_' + uselib, qtlibs) + env.append_unique('INCLUDES_' + uselib, qtincludes) + env.append_unique('INCLUDES_' + uselib, os.path.join(qtincludes, i)) + else: + for i in self.qt4_vars_debug + self.qt4_vars: + self.check_cfg(package=i, args='--cflags --libs', mandatory=False) + +@conf +def simplify_qt4_libs(self): + # the libpaths make really long command-lines + # remove the qtcore ones from qtgui, etc + env = self.env + def process_lib(vars_, coreval): + for d in vars_: + var = d.upper() + if var == 'QTCORE': + continue + + value = env['LIBPATH_'+var] + if value: + core = env[coreval] + accu = [] + for lib in value: + if lib in core: + continue + accu.append(lib) + env['LIBPATH_'+var] = accu + + process_lib(self.qt4_vars, 'LIBPATH_QTCORE') + process_lib(self.qt4_vars_debug, 'LIBPATH_QTCORE_DEBUG') + +@conf +def add_qt4_rpath(self): + # rpath if wanted + env = self.env + if getattr(Options.options, 'want_rpath', False): + def process_rpath(vars_, coreval): + for d in vars_: + var = d.upper() + value = env['LIBPATH_'+var] + if value: + core = env[coreval] + accu = [] + for lib in value: + if var != 'QTCORE': + if lib in core: + continue + accu.append('-Wl,--rpath='+lib) + env['RPATH_'+var] = accu + process_rpath(self.qt4_vars, 'LIBPATH_QTCORE') + process_rpath(self.qt4_vars_debug, 'LIBPATH_QTCORE_DEBUG') + +@conf +def set_qt4_libs_to_check(self): + if not hasattr(self, 'qt4_vars'): + self.qt4_vars = QT4_LIBS + self.qt4_vars = Utils.to_list(self.qt4_vars) + if not hasattr(self, 'qt4_vars_debug'): + self.qt4_vars_debug = [a + '_debug' for a in self.qt4_vars] + self.qt4_vars_debug = Utils.to_list(self.qt4_vars_debug) + +@conf +def set_qt4_defines(self): + if sys.platform != 'win32': + return + for x in self.qt4_vars: + y = x[2:].upper() + self.env.append_unique('DEFINES_%s' % x.upper(), 'QT_%s_LIB' % y) + self.env.append_unique('DEFINES_%s_DEBUG' % x.upper(), 'QT_%s_LIB' % y) + +def options(opt): + """ + Command-line options + """ + opt.add_option('--want-rpath', action='store_true', default=False, dest='want_rpath', help='enable the rpath for qt libraries') + + opt.add_option('--header-ext', + type='string', + default='', + help='header extension for moc files', + dest='qt_header_ext') + + for i in 'qtdir qtbin qtlibs'.split(): + opt.add_option('--'+i, type='string', default='', dest=i) + + opt.add_option('--translate', action="store_true", help="collect translation strings", dest="trans_qt4", default=False) + diff --git a/third_party/waf/waflib/extras/relocation.py b/third_party/waf/waflib/extras/relocation.py new file mode 100644 index 0000000..7e821f4 --- /dev/null +++ b/third_party/waf/waflib/extras/relocation.py @@ -0,0 +1,85 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +Waf 1.6 + +Try to detect if the project directory was relocated, and if it was, +change the node representing the project directory. Just call: + + waf configure build + +Note that if the project directory name changes, the signatures for the tasks using +files in that directory will change, causing a partial build. +""" + +import os +from waflib import Build, ConfigSet, Task, Utils, Errors +from waflib.TaskGen import feature, after_method + +EXTRA_LOCK = '.old_srcdir' + +old1 = Build.BuildContext.store +def store(self): + old1(self) + db = os.path.join(self.variant_dir, EXTRA_LOCK) + env = ConfigSet.ConfigSet() + env.SRCDIR = self.srcnode.abspath() + env.store(db) +Build.BuildContext.store = store + +old2 = Build.BuildContext.init_dirs +def init_dirs(self): + + if not (os.path.isabs(self.top_dir) and os.path.isabs(self.out_dir)): + raise Errors.WafError('The project was not configured: run "waf configure" first!') + + srcdir = None + db = os.path.join(self.variant_dir, EXTRA_LOCK) + env = ConfigSet.ConfigSet() + try: + env.load(db) + srcdir = env.SRCDIR + except: + pass + + if srcdir: + d = self.root.find_node(srcdir) + if d and srcdir != self.top_dir and getattr(d, 'children', ''): + srcnode = self.root.make_node(self.top_dir) + print("relocating the source directory %r -> %r" % (srcdir, self.top_dir)) + srcnode.children = {} + + for (k, v) in d.children.items(): + srcnode.children[k] = v + v.parent = srcnode + d.children = {} + + old2(self) + +Build.BuildContext.init_dirs = init_dirs + + +def uid(self): + try: + return self.uid_ + except AttributeError: + # this is not a real hot zone, but we want to avoid surprises here + m = Utils.md5() + up = m.update + up(self.__class__.__name__.encode()) + for x in self.inputs + self.outputs: + up(x.path_from(x.ctx.srcnode).encode()) + self.uid_ = m.digest() + return self.uid_ +Task.Task.uid = uid + +@feature('c', 'cxx', 'd', 'go', 'asm', 'fc', 'includes') +@after_method('propagate_uselib_vars', 'process_source') +def apply_incpaths(self): + lst = self.to_incnodes(self.to_list(getattr(self, 'includes', [])) + self.env['INCLUDES']) + self.includes_nodes = lst + bld = self.bld + self.env['INCPATHS'] = [x.is_child_of(bld.srcnode) and x.path_from(bld.bldnode) or x.abspath() for x in lst] + + diff --git a/third_party/waf/waflib/extras/remote.py b/third_party/waf/waflib/extras/remote.py new file mode 100644 index 0000000..f43b600 --- /dev/null +++ b/third_party/waf/waflib/extras/remote.py @@ -0,0 +1,327 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Remote Builds tool using rsync+ssh + +__author__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2013" + +""" +Simple Remote Builds +******************** + +This tool is an *experimental* tool (meaning, do not even try to pollute +the waf bug tracker with bugs in here, contact me directly) providing simple +remote builds. + +It uses rsync and ssh to perform the remote builds. +It is intended for performing cross-compilation on platforms where +a cross-compiler is either unavailable (eg. MacOS, QNX) a specific product +does not exist (eg. Windows builds using Visual Studio) or simply not installed. +This tool sends the sources and the waf script to the remote host, +and commands the usual waf execution. + +There are alternatives to using this tool, such as setting up shared folders, +logging on to remote machines, and building on the shared folders. +Electing one method or another depends on the size of the program. + + +Usage +===== + +1. Set your wscript file so it includes a list of variants, + e.g.:: + + from waflib import Utils + top = '.' + out = 'build' + + variants = [ + 'linux_64_debug', + 'linux_64_release', + 'linux_32_debug', + 'linux_32_release', + ] + + from waflib.extras import remote + + def options(opt): + # normal stuff from here on + opt.load('compiler_c') + + def configure(conf): + if not conf.variant: + return + # normal stuff from here on + conf.load('compiler_c') + + def build(bld): + if not bld.variant: + return + # normal stuff from here on + bld(features='c cprogram', target='app', source='main.c') + + +2. Build the waf file, so it includes this tool, and put it in the current + directory + + .. code:: bash + + ./waf-light --tools=remote + +3. Set the host names to access the hosts: + + .. code:: bash + + export REMOTE_QNX=user@kiunix + +4. Setup the ssh server and ssh keys + + The ssh key should not be protected by a password, or it will prompt for it every time. + Create the key on the client: + + .. code:: bash + + ssh-keygen -t rsa -f foo.rsa + + Then copy foo.rsa.pub to the remote machine (user@kiunix:/home/user/.ssh/authorized_keys), + and make sure the permissions are correct (chmod go-w ~ ~/.ssh ~/.ssh/authorized_keys) + + A separate key for the build processes can be set in the environment variable WAF_SSH_KEY. + The tool will then use 'ssh-keyscan' to avoid prompting for remote hosts, so + be warned to use this feature on internal networks only (MITM). + + .. code:: bash + + export WAF_SSH_KEY=~/foo.rsa + +5. Perform the build: + + .. code:: bash + + waf configure_all build_all --remote + +""" + + +import getpass, os, re, sys +from collections import OrderedDict +from waflib import Context, Options, Utils, ConfigSet + +from waflib.Build import BuildContext, CleanContext, InstallContext, UninstallContext +from waflib.Configure import ConfigurationContext + + +is_remote = False +if '--remote' in sys.argv: + is_remote = True + sys.argv.remove('--remote') + +class init(Context.Context): + """ + Generates the *_all commands + """ + cmd = 'init' + fun = 'init' + def execute(self): + for x in list(Context.g_module.variants): + self.make_variant(x) + lst = ['remote'] + for k in Options.commands: + if k.endswith('_all'): + name = k.replace('_all', '') + for x in Context.g_module.variants: + lst.append('%s_%s' % (name, x)) + else: + lst.append(k) + del Options.commands[:] + Options.commands += lst + + def make_variant(self, x): + for y in (BuildContext, CleanContext, InstallContext, UninstallContext): + name = y.__name__.replace('Context','').lower() + class tmp(y): + cmd = name + '_' + x + fun = 'build' + variant = x + class tmp(ConfigurationContext): + cmd = 'configure_' + x + fun = 'configure' + variant = x + def __init__(self, **kw): + ConfigurationContext.__init__(self, **kw) + self.setenv(x) + +class remote(BuildContext): + cmd = 'remote' + fun = 'build' + + def get_ssh_hosts(self): + lst = [] + for v in Context.g_module.variants: + self.env.HOST = self.login_to_host(self.variant_to_login(v)) + cmd = Utils.subst_vars('${SSH_KEYSCAN} -t rsa,ecdsa ${HOST}', self.env) + out, err = self.cmd_and_log(cmd, output=Context.BOTH, quiet=Context.BOTH) + lst.append(out.strip()) + return lst + + def setup_private_ssh_key(self): + """ + When WAF_SSH_KEY points to a private key, a .ssh directory will be created in the build directory + Make sure that the ssh key does not prompt for a password + """ + key = os.environ.get('WAF_SSH_KEY', '') + if not key: + return + if not os.path.isfile(key): + self.fatal('Key in WAF_SSH_KEY must point to a valid file') + self.ssh_dir = os.path.join(self.path.abspath(), 'build', '.ssh') + self.ssh_hosts = os.path.join(self.ssh_dir, 'known_hosts') + self.ssh_key = os.path.join(self.ssh_dir, os.path.basename(key)) + self.ssh_config = os.path.join(self.ssh_dir, 'config') + for x in self.ssh_hosts, self.ssh_key, self.ssh_config: + if not os.path.isfile(x): + if not os.path.isdir(self.ssh_dir): + os.makedirs(self.ssh_dir) + Utils.writef(self.ssh_key, Utils.readf(key), 'wb') + os.chmod(self.ssh_key, 448) + + Utils.writef(self.ssh_hosts, '\n'.join(self.get_ssh_hosts())) + os.chmod(self.ssh_key, 448) + + Utils.writef(self.ssh_config, 'UserKnownHostsFile %s' % self.ssh_hosts, 'wb') + os.chmod(self.ssh_config, 448) + self.env.SSH_OPTS = ['-F', self.ssh_config, '-i', self.ssh_key] + self.env.append_value('RSYNC_SEND_OPTS', '--exclude=build/.ssh') + + def skip_unbuildable_variant(self): + # skip variants that cannot be built on this OS + for k in Options.commands: + a, _, b = k.partition('_') + if b in Context.g_module.variants: + c, _, _ = b.partition('_') + if c != Utils.unversioned_sys_platform(): + Options.commands.remove(k) + + def login_to_host(self, login): + return re.sub(r'(\w+@)', '', login) + + def variant_to_login(self, variant): + """linux_32_debug -> search env.LINUX_32 and then env.LINUX""" + x = variant[:variant.rfind('_')] + ret = os.environ.get('REMOTE_' + x.upper(), '') + if not ret: + x = x[:x.find('_')] + ret = os.environ.get('REMOTE_' + x.upper(), '') + if not ret: + ret = '%s@localhost' % getpass.getuser() + return ret + + def execute(self): + global is_remote + if not is_remote: + self.skip_unbuildable_variant() + else: + BuildContext.execute(self) + + def restore(self): + self.top_dir = os.path.abspath(Context.g_module.top) + self.srcnode = self.root.find_node(self.top_dir) + self.path = self.srcnode + + self.out_dir = os.path.join(self.top_dir, Context.g_module.out) + self.bldnode = self.root.make_node(self.out_dir) + self.bldnode.mkdir() + + self.env = ConfigSet.ConfigSet() + + def extract_groups_of_builds(self): + """Return a dict mapping each variants to the commands to build""" + self.vgroups = {} + for x in reversed(Options.commands): + _, _, variant = x.partition('_') + if variant in Context.g_module.variants: + try: + dct = self.vgroups[variant] + except KeyError: + dct = self.vgroups[variant] = OrderedDict() + try: + dct[variant].append(x) + except KeyError: + dct[variant] = [x] + Options.commands.remove(x) + + def custom_options(self, login): + try: + return Context.g_module.host_options[login] + except (AttributeError, KeyError): + return {} + + def recurse(self, *k, **kw): + self.env.RSYNC = getattr(Context.g_module, 'rsync', 'rsync -a --chmod=u+rwx') + self.env.SSH = getattr(Context.g_module, 'ssh', 'ssh') + self.env.SSH_KEYSCAN = getattr(Context.g_module, 'ssh_keyscan', 'ssh-keyscan') + try: + self.env.WAF = getattr(Context.g_module, 'waf') + except AttributeError: + try: + os.stat('waf') + except KeyError: + self.fatal('Put a waf file in the directory (./waf-light --tools=remote)') + else: + self.env.WAF = './waf' + + self.extract_groups_of_builds() + self.setup_private_ssh_key() + for k, v in self.vgroups.items(): + task = self(rule=rsync_and_ssh, always=True) + task.env.login = self.variant_to_login(k) + + task.env.commands = [] + for opt, value in v.items(): + task.env.commands += value + task.env.variant = task.env.commands[0].partition('_')[2] + for opt, value in self.custom_options(k): + task.env[opt] = value + self.jobs = len(self.vgroups) + + def make_mkdir_command(self, task): + return Utils.subst_vars('${SSH} ${SSH_OPTS} ${login} "rm -fr ${remote_dir} && mkdir -p ${remote_dir}"', task.env) + + def make_send_command(self, task): + return Utils.subst_vars('${RSYNC} ${RSYNC_SEND_OPTS} -e "${SSH} ${SSH_OPTS}" ${local_dir} ${login}:${remote_dir}', task.env) + + def make_exec_command(self, task): + txt = '''${SSH} ${SSH_OPTS} ${login} "cd ${remote_dir} && ${WAF} ${commands}"''' + return Utils.subst_vars(txt, task.env) + + def make_save_command(self, task): + return Utils.subst_vars('${RSYNC} ${RSYNC_SAVE_OPTS} -e "${SSH} ${SSH_OPTS}" ${login}:${remote_dir_variant} ${build_dir}', task.env) + +def rsync_and_ssh(task): + + # remove a warning + task.uid_ = id(task) + + bld = task.generator.bld + + task.env.user, _, _ = task.env.login.partition('@') + task.env.hdir = Utils.to_hex(Utils.h_list((task.generator.path.abspath(), task.env.variant))) + task.env.remote_dir = '~%s/wafremote/%s' % (task.env.user, task.env.hdir) + task.env.local_dir = bld.srcnode.abspath() + '/' + + task.env.remote_dir_variant = '%s/%s/%s' % (task.env.remote_dir, Context.g_module.out, task.env.variant) + task.env.build_dir = bld.bldnode.abspath() + + ret = task.exec_command(bld.make_mkdir_command(task)) + if ret: + return ret + ret = task.exec_command(bld.make_send_command(task)) + if ret: + return ret + ret = task.exec_command(bld.make_exec_command(task)) + if ret: + return ret + ret = task.exec_command(bld.make_save_command(task)) + if ret: + return ret + diff --git a/third_party/waf/waflib/extras/resx.py b/third_party/waf/waflib/extras/resx.py new file mode 100644 index 0000000..caf4d31 --- /dev/null +++ b/third_party/waf/waflib/extras/resx.py @@ -0,0 +1,35 @@ +#! /usr/bin/env python +# encoding: utf-8 + +import os +from waflib import Task +from waflib.TaskGen import extension + +def configure(conf): + conf.find_program(['resgen'], var='RESGEN') + conf.env.RESGENFLAGS = '/useSourcePath' + +@extension('.resx') +def resx_file(self, node): + """ + Bind the .resx extension to a resgen task + """ + if not getattr(self, 'cs_task', None): + self.bld.fatal('resx_file has no link task for use %r' % self) + + # Given assembly 'Foo' and file 'Sub/Dir/File.resx', create 'Foo.Sub.Dir.File.resources' + assembly = getattr(self, 'namespace', os.path.splitext(self.gen)[0]) + res = os.path.splitext(node.path_from(self.path))[0].replace('/', '.').replace('\\', '.') + out = self.path.find_or_declare(assembly + '.' + res + '.resources') + + tsk = self.create_task('resgen', node, out) + + self.cs_task.dep_nodes.extend(tsk.outputs) # dependency + self.env.append_value('RESOURCES', tsk.outputs[0].bldpath()) + +class resgen(Task.Task): + """ + Compile C# resource files + """ + color = 'YELLOW' + run_str = '${RESGEN} ${RESGENFLAGS} ${SRC} ${TGT}' diff --git a/third_party/waf/waflib/extras/review.py b/third_party/waf/waflib/extras/review.py new file mode 100644 index 0000000..561e062 --- /dev/null +++ b/third_party/waf/waflib/extras/review.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Laurent Birtz, 2011 +# moved the code into a separate tool (ita) + +""" +There are several things here: +- a different command-line option management making options persistent +- the review command to display the options set + +Assumptions: +- configuration options are not always added to the right group (and do not count on the users to do it...) +- the options are persistent between the executions (waf options are NOT persistent by design), even for the configuration +- when the options change, the build is invalidated (forcing a reconfiguration) +""" + +import os, textwrap, shutil +from waflib import Logs, Context, ConfigSet, Options, Build, Configure + +class Odict(dict): + """Ordered dictionary""" + def __init__(self, data=None): + self._keys = [] + dict.__init__(self) + if data: + # we were provided a regular dict + if isinstance(data, dict): + self.append_from_dict(data) + + # we were provided a tuple list + elif type(data) == list: + self.append_from_plist(data) + + # we were provided invalid input + else: + raise Exception("expected a dict or a tuple list") + + def append_from_dict(self, dict): + map(self.__setitem__, dict.keys(), dict.values()) + + def append_from_plist(self, plist): + for pair in plist: + if len(pair) != 2: + raise Exception("invalid pairs list") + for (k, v) in plist: + self.__setitem__(k, v) + + def __delitem__(self, key): + if not key in self._keys: + raise KeyError(key) + dict.__delitem__(self, key) + self._keys.remove(key) + + def __setitem__(self, key, item): + dict.__setitem__(self, key, item) + if key not in self._keys: + self._keys.append(key) + + def clear(self): + dict.clear(self) + self._keys = [] + + def copy(self): + return Odict(self.plist()) + + def items(self): + return zip(self._keys, self.values()) + + def keys(self): + return list(self._keys) # return a copy of the list + + def values(self): + return map(self.get, self._keys) + + def plist(self): + p = [] + for k, v in self.items(): + p.append( (k, v) ) + return p + + def __str__(self): + buf = [] + buf.append("{ ") + for k, v in self.items(): + buf.append('%r : %r, ' % (k, v)) + buf.append("}") + return ''.join(buf) + +review_options = Odict() +""" +Ordered dictionary mapping configuration option names to their optparse option. +""" + +review_defaults = {} +""" +Dictionary mapping configuration option names to their default value. +""" + +old_review_set = None +""" +Review set containing the configuration values before parsing the command line. +""" + +new_review_set = None +""" +Review set containing the configuration values after parsing the command line. +""" + +class OptionsReview(Options.OptionsContext): + def __init__(self, **kw): + super(self.__class__, self).__init__(**kw) + + def prepare_config_review(self): + """ + Find the configuration options that are reviewable, detach + their default value from their optparse object and store them + into the review dictionaries. + """ + gr = self.get_option_group('configure options') + for opt in gr.option_list: + if opt.action != 'store' or opt.dest in ("out", "top"): + continue + review_options[opt.dest] = opt + review_defaults[opt.dest] = opt.default + if gr.defaults.has_key(opt.dest): + del gr.defaults[opt.dest] + opt.default = None + + def parse_args(self): + self.prepare_config_review() + self.parser.get_option('--prefix').help = 'installation prefix' + super(OptionsReview, self).parse_args() + Context.create_context('review').refresh_review_set() + +class ReviewContext(Context.Context): + '''reviews the configuration values''' + + cmd = 'review' + + def __init__(self, **kw): + super(self.__class__, self).__init__(**kw) + + out = Options.options.out + if not out: + out = getattr(Context.g_module, Context.OUT, None) + if not out: + out = Options.lockfile.replace('.lock-waf', '') + self.build_path = (os.path.isabs(out) and self.root or self.path).make_node(out).abspath() + """Path to the build directory""" + + self.cache_path = os.path.join(self.build_path, Build.CACHE_DIR) + """Path to the cache directory""" + + self.review_path = os.path.join(self.cache_path, 'review.cache') + """Path to the review cache file""" + + def execute(self): + """ + Display and store the review set. Invalidate the cache as required. + """ + if not self.compare_review_set(old_review_set, new_review_set): + self.invalidate_cache() + self.store_review_set(new_review_set) + print(self.display_review_set(new_review_set)) + + def invalidate_cache(self): + """Invalidate the cache to prevent bad builds.""" + try: + Logs.warn("Removing the cached configuration since the options have changed") + shutil.rmtree(self.cache_path) + except: + pass + + def refresh_review_set(self): + """ + Obtain the old review set and the new review set, and import the new set. + """ + global old_review_set, new_review_set + old_review_set = self.load_review_set() + new_review_set = self.update_review_set(old_review_set) + self.import_review_set(new_review_set) + + def load_review_set(self): + """ + Load and return the review set from the cache if it exists. + Otherwise, return an empty set. + """ + if os.path.isfile(self.review_path): + return ConfigSet.ConfigSet(self.review_path) + return ConfigSet.ConfigSet() + + def store_review_set(self, review_set): + """ + Store the review set specified in the cache. + """ + if not os.path.isdir(self.cache_path): + os.makedirs(self.cache_path) + review_set.store(self.review_path) + + def update_review_set(self, old_set): + """ + Merge the options passed on the command line with those imported + from the previous review set and return the corresponding + preview set. + """ + + # Convert value to string. It's important that 'None' maps to + # the empty string. + def val_to_str(val): + if val == None or val == '': + return '' + return str(val) + + new_set = ConfigSet.ConfigSet() + opt_dict = Options.options.__dict__ + + for name in review_options.keys(): + # the option is specified explicitly on the command line + if name in opt_dict: + # if the option is the default, pretend it was never specified + if val_to_str(opt_dict[name]) != val_to_str(review_defaults[name]): + new_set[name] = opt_dict[name] + # the option was explicitly specified in a previous command + elif name in old_set: + new_set[name] = old_set[name] + + return new_set + + def import_review_set(self, review_set): + """ + Import the actual value of the reviewable options in the option + dictionary, given the current review set. + """ + for name in review_options.keys(): + if name in review_set: + value = review_set[name] + else: + value = review_defaults[name] + setattr(Options.options, name, value) + + def compare_review_set(self, set1, set2): + """ + Return true if the review sets specified are equal. + """ + if len(set1.keys()) != len(set2.keys()): + return False + for key in set1.keys(): + if not key in set2 or set1[key] != set2[key]: + return False + return True + + def display_review_set(self, review_set): + """ + Return the string representing the review set specified. + """ + term_width = Logs.get_term_cols() + lines = [] + for dest in review_options.keys(): + opt = review_options[dest] + name = ", ".join(opt._short_opts + opt._long_opts) + help = opt.help + actual = None + if dest in review_set: + actual = review_set[dest] + default = review_defaults[dest] + lines.append(self.format_option(name, help, actual, default, term_width)) + return "Configuration:\n\n" + "\n\n".join(lines) + "\n" + + def format_option(self, name, help, actual, default, term_width): + """ + Return the string representing the option specified. + """ + def val_to_str(val): + if val == None or val == '': + return "(void)" + return str(val) + + max_name_len = 20 + sep_len = 2 + + w = textwrap.TextWrapper() + w.width = term_width - 1 + if w.width < 60: + w.width = 60 + + out = "" + + # format the help + out += w.fill(help) + "\n" + + # format the name + name_len = len(name) + out += Logs.colors.CYAN + name + Logs.colors.NORMAL + + # set the indentation used when the value wraps to the next line + w.subsequent_indent = " ".rjust(max_name_len + sep_len) + w.width -= (max_name_len + sep_len) + + # the name string is too long, switch to the next line + if name_len > max_name_len: + out += "\n" + w.subsequent_indent + + # fill the remaining of the line with spaces + else: + out += " ".rjust(max_name_len + sep_len - name_len) + + # format the actual value, if there is one + if actual != None: + out += Logs.colors.BOLD + w.fill(val_to_str(actual)) + Logs.colors.NORMAL + "\n" + w.subsequent_indent + + # format the default value + default_fmt = val_to_str(default) + if actual != None: + default_fmt = "default: " + default_fmt + out += Logs.colors.NORMAL + w.fill(default_fmt) + Logs.colors.NORMAL + + return out + +# Monkey-patch ConfigurationContext.execute() to have it store the review set. +old_configure_execute = Configure.ConfigurationContext.execute +def new_configure_execute(self): + old_configure_execute(self) + Context.create_context('review').store_review_set(new_review_set) +Configure.ConfigurationContext.execute = new_configure_execute + diff --git a/third_party/waf/waflib/extras/rst.py b/third_party/waf/waflib/extras/rst.py new file mode 100644 index 0000000..f3c3a5e --- /dev/null +++ b/third_party/waf/waflib/extras/rst.py @@ -0,0 +1,260 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Jérôme Carretero, 2013 (zougloub) + +""" +reStructuredText support (experimental) + +Example:: + + def configure(conf): + conf.load('rst') + if not conf.env.RST2HTML: + conf.fatal('The program rst2html is required') + + def build(bld): + bld( + features = 'rst', + type = 'rst2html', # rst2html, rst2pdf, ... + source = 'index.rst', # mandatory, the source + deps = 'image.png', # to give additional non-trivial dependencies + ) + +By default the tool looks for a set of programs in PATH. +The tools are defined in `rst_progs`. +To configure with a special program use:: + + $ RST2HTML=/path/to/rst2html waf configure + +This tool is experimental; don't hesitate to contribute to it. + +""" + +import re +from waflib import Node, Utils, Task, Errors, Logs +from waflib.TaskGen import feature, before_method + +rst_progs = "rst2html rst2xetex rst2latex rst2xml rst2pdf rst2s5 rst2man rst2odt rst2rtf".split() + +def parse_rst_node(task, node, nodes, names, seen, dirs=None): + # TODO add extensibility, to handle custom rst include tags... + if dirs is None: + dirs = (node.parent,node.get_bld().parent) + + if node in seen: + return + seen.append(node) + code = node.read() + re_rst = re.compile(r'^\s*.. ((?P<subst>\|\S+\|) )?(?P<type>include|image|figure):: (?P<file>.*)$', re.M) + for match in re_rst.finditer(code): + ipath = match.group('file') + itype = match.group('type') + Logs.debug('rst: visiting %s: %s', itype, ipath) + found = False + for d in dirs: + Logs.debug('rst: looking for %s in %s', ipath, d.abspath()) + found = d.find_node(ipath) + if found: + Logs.debug('rst: found %s as %s', ipath, found.abspath()) + nodes.append((itype, found)) + if itype == 'include': + parse_rst_node(task, found, nodes, names, seen) + break + if not found: + names.append((itype, ipath)) + +class docutils(Task.Task): + """ + Compile a rst file. + """ + + def scan(self): + """ + A recursive regex-based scanner that finds rst dependencies. + """ + + nodes = [] + names = [] + seen = [] + + node = self.inputs[0] + + if not node: + return (nodes, names) + + parse_rst_node(self, node, nodes, names, seen) + + Logs.debug('rst: %r: found the following file deps: %r', self, nodes) + if names: + Logs.warn('rst: %r: could not find the following file deps: %r', self, names) + + return ([v for (t,v) in nodes], [v for (t,v) in names]) + + def check_status(self, msg, retcode): + """ + Check an exit status and raise an error with a particular message + + :param msg: message to display if the code is non-zero + :type msg: string + :param retcode: condition + :type retcode: boolean + """ + if retcode != 0: + raise Errors.WafError('%r command exit status %r' % (msg, retcode)) + + def run(self): + """ + Runs the rst compilation using docutils + """ + raise NotImplementedError() + +class rst2html(docutils): + color = 'BLUE' + + def __init__(self, *args, **kw): + docutils.__init__(self, *args, **kw) + self.command = self.generator.env.RST2HTML + self.attributes = ['stylesheet'] + + def scan(self): + nodes, names = docutils.scan(self) + + for attribute in self.attributes: + stylesheet = getattr(self.generator, attribute, None) + if stylesheet is not None: + ssnode = self.generator.to_nodes(stylesheet)[0] + nodes.append(ssnode) + Logs.debug('rst: adding dep to %s %s', attribute, stylesheet) + + return nodes, names + + def run(self): + cwdn = self.outputs[0].parent + src = self.inputs[0].path_from(cwdn) + dst = self.outputs[0].path_from(cwdn) + + cmd = self.command + [src, dst] + cmd += Utils.to_list(getattr(self.generator, 'options', [])) + for attribute in self.attributes: + stylesheet = getattr(self.generator, attribute, None) + if stylesheet is not None: + stylesheet = self.generator.to_nodes(stylesheet)[0] + cmd += ['--%s' % attribute, stylesheet.path_from(cwdn)] + + return self.exec_command(cmd, cwd=cwdn.abspath()) + +class rst2s5(rst2html): + def __init__(self, *args, **kw): + rst2html.__init__(self, *args, **kw) + self.command = self.generator.env.RST2S5 + self.attributes = ['stylesheet'] + +class rst2latex(rst2html): + def __init__(self, *args, **kw): + rst2html.__init__(self, *args, **kw) + self.command = self.generator.env.RST2LATEX + self.attributes = ['stylesheet'] + +class rst2xetex(rst2html): + def __init__(self, *args, **kw): + rst2html.__init__(self, *args, **kw) + self.command = self.generator.env.RST2XETEX + self.attributes = ['stylesheet'] + +class rst2pdf(docutils): + color = 'BLUE' + def run(self): + cwdn = self.outputs[0].parent + src = self.inputs[0].path_from(cwdn) + dst = self.outputs[0].path_from(cwdn) + + cmd = self.generator.env.RST2PDF + [src, '-o', dst] + cmd += Utils.to_list(getattr(self.generator, 'options', [])) + + return self.exec_command(cmd, cwd=cwdn.abspath()) + + +@feature('rst') +@before_method('process_source') +def apply_rst(self): + """ + Create :py:class:`rst` or other rst-related task objects + """ + + if self.target: + if isinstance(self.target, Node.Node): + tgt = self.target + elif isinstance(self.target, str): + tgt = self.path.get_bld().make_node(self.target) + else: + self.bld.fatal("rst: Don't know how to build target name %s which is not a string or Node for %s" % (self.target, self)) + else: + tgt = None + + tsk_type = getattr(self, 'type', None) + + src = self.to_nodes(self.source) + assert len(src) == 1 + src = src[0] + + if tsk_type is not None and tgt is None: + if tsk_type.startswith('rst2'): + ext = tsk_type[4:] + else: + self.bld.fatal("rst: Could not detect the output file extension for %s" % self) + tgt = src.change_ext('.%s' % ext) + elif tsk_type is None and tgt is not None: + out = tgt.name + ext = out[out.rfind('.')+1:] + self.type = 'rst2' + ext + elif tsk_type is not None and tgt is not None: + # the user knows what he wants + pass + else: + self.bld.fatal("rst: Need to indicate task type or target name for %s" % self) + + deps_lst = [] + + if getattr(self, 'deps', None): + deps = self.to_list(self.deps) + for filename in deps: + n = self.path.find_resource(filename) + if not n: + self.bld.fatal('Could not find %r for %r' % (filename, self)) + if not n in deps_lst: + deps_lst.append(n) + + try: + task = self.create_task(self.type, src, tgt) + except KeyError: + self.bld.fatal("rst: Task of type %s not implemented (created by %s)" % (self.type, self)) + + task.env = self.env + + # add the manual dependencies + if deps_lst: + try: + lst = self.bld.node_deps[task.uid()] + for n in deps_lst: + if not n in lst: + lst.append(n) + except KeyError: + self.bld.node_deps[task.uid()] = deps_lst + + inst_to = getattr(self, 'install_path', None) + if inst_to: + self.install_task = self.add_install_files(install_to=inst_to, install_from=task.outputs[:]) + + self.source = [] + +def configure(self): + """ + Try to find the rst programs. + + Do not raise any error if they are not found. + You'll have to use additional code in configure() to die + if programs were not found. + """ + for p in rst_progs: + self.find_program(p, mandatory=False) + diff --git a/third_party/waf/waflib/extras/run_do_script.py b/third_party/waf/waflib/extras/run_do_script.py new file mode 100644 index 0000000..07e3aa2 --- /dev/null +++ b/third_party/waf/waflib/extras/run_do_script.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Hans-Martin von Gaudecker, 2012 + +""" +Run a Stata do-script in the directory specified by **ctx.bldnode**. The +first and only argument will be the name of the do-script (no extension), +which can be accessed inside the do-script by the local macro `1'. Useful +for keeping a log file. + +The tool uses the log file that is automatically kept by Stata only +for error-catching purposes, it will be destroyed if the task finished +without error. In case of an error in **some_script.do**, you can inspect +it as **some_script.log** in the **ctx.bldnode** directory. + +Note that Stata will not return an error code if it exits abnormally -- +catching errors relies on parsing the log file mentioned before. Should +the parser behave incorrectly please send an email to hmgaudecker [at] gmail. + +**WARNING** + + The tool will not work if multiple do-scripts of the same name---but in + different directories---are run at the same time! Avoid this situation. + +Usage:: + + ctx(features='run_do_script', + source='some_script.do', + target=['some_table.tex', 'some_figure.eps'], + deps='some_data.csv') +""" + + +import os, re, sys +from waflib import Task, TaskGen, Logs + +if sys.platform == 'darwin': + STATA_COMMANDS = ['Stata64MP', 'StataMP', + 'Stata64SE', 'StataSE', + 'Stata64', 'Stata'] + STATAFLAGS = '-e -q do' + STATAENCODING = 'MacRoman' +elif sys.platform.startswith('linux'): + STATA_COMMANDS = ['stata-mp', 'stata-se', 'stata'] + STATAFLAGS = '-b -q do' + # Not sure whether this is correct... + STATAENCODING = 'Latin-1' +elif sys.platform.lower().startswith('win'): + STATA_COMMANDS = ['StataMP-64', 'StataMP-ia', + 'StataMP', 'StataSE-64', + 'StataSE-ia', 'StataSE', + 'Stata-64', 'Stata-ia', + 'Stata.e', 'WMPSTATA', + 'WSESTATA', 'WSTATA'] + STATAFLAGS = '/e do' + STATAENCODING = 'Latin-1' +else: + raise Exception("Unknown sys.platform: %s " % sys.platform) + +def configure(ctx): + ctx.find_program(STATA_COMMANDS, var='STATACMD', errmsg="""\n +No Stata executable found!\n\n +If Stata is needed:\n + 1) Check the settings of your system path. + 2) Note we are looking for Stata executables called: %s + If yours has a different name, please report to hmgaudecker [at] gmail\n +Else:\n + Do not load the 'run_do_script' tool in the main wscript.\n\n""" % STATA_COMMANDS) + ctx.env.STATAFLAGS = STATAFLAGS + ctx.env.STATAENCODING = STATAENCODING + +class run_do_script_base(Task.Task): + """Run a Stata do-script from the bldnode directory.""" + run_str = '"${STATACMD}" ${STATAFLAGS} "${SRC[0].abspath()}" "${DOFILETRUNK}"' + shell = True + +class run_do_script(run_do_script_base): + """Use the log file automatically kept by Stata for error-catching. + Erase it if the task finished without error. If not, it will show + up as do_script.log in the bldnode directory. + """ + def run(self): + run_do_script_base.run(self) + ret, log_tail = self.check_erase_log_file() + if ret: + Logs.error("""Running Stata on %r failed with code %r.\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", + self.inputs[0], ret, self.env.LOGFILEPATH, log_tail) + return ret + + def check_erase_log_file(self): + """Parse Stata's default log file and erase it if everything okay. + + Parser is based on Brendan Halpin's shell script found here: + http://teaching.sociology.ul.ie/bhalpin/wordpress/?p=122 + """ + + if sys.version_info.major >= 3: + kwargs = {'file': self.env.LOGFILEPATH, 'mode': 'r', 'encoding': self.env.STATAENCODING} + else: + kwargs = {'name': self.env.LOGFILEPATH, 'mode': 'r'} + with open(**kwargs) as log: + log_tail = log.readlines()[-10:] + for line in log_tail: + error_found = re.match(r"r\(([0-9]+)\)", line) + if error_found: + return error_found.group(1), ''.join(log_tail) + else: + pass + # Only end up here if the parser did not identify an error. + os.remove(self.env.LOGFILEPATH) + return None, None + + +@TaskGen.feature('run_do_script') +@TaskGen.before_method('process_source') +def apply_run_do_script(tg): + """Task generator customising the options etc. to call Stata in batch + mode for running a do-script. + """ + + # Convert sources and targets to nodes + src_node = tg.path.find_resource(tg.source) + tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] + + tsk = tg.create_task('run_do_script', src=src_node, tgt=tgt_nodes) + tsk.env.DOFILETRUNK = os.path.splitext(src_node.name)[0] + tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s.log' % (tsk.env.DOFILETRUNK)) + + # dependencies (if the attribute 'deps' changes, trigger a recompilation) + for x in tg.to_list(getattr(tg, 'deps', [])): + node = tg.path.find_resource(x) + if not node: + tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) + tsk.dep_nodes.append(node) + Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) + + # Bypass the execution of process_source by setting the source to an empty list + tg.source = [] + diff --git a/third_party/waf/waflib/extras/run_m_script.py b/third_party/waf/waflib/extras/run_m_script.py new file mode 100644 index 0000000..b5f27eb --- /dev/null +++ b/third_party/waf/waflib/extras/run_m_script.py @@ -0,0 +1,88 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Hans-Martin von Gaudecker, 2012 + +""" +Run a Matlab script. + +Note that the script is run in the directory where it lives -- Matlab won't +allow it any other way. + +For error-catching purposes, keep an own log-file that is destroyed if the +task finished without error. If not, it will show up as mscript_[index].log +in the bldnode directory. + +Usage:: + + ctx(features='run_m_script', + source='some_script.m', + target=['some_table.tex', 'some_figure.eps'], + deps='some_data.mat') +""" + +import os, sys +from waflib import Task, TaskGen, Logs + +MATLAB_COMMANDS = ['matlab'] + +def configure(ctx): + ctx.find_program(MATLAB_COMMANDS, var='MATLABCMD', errmsg = """\n +No Matlab executable found!\n\n +If Matlab is needed:\n + 1) Check the settings of your system path. + 2) Note we are looking for Matlab executables called: %s + If yours has a different name, please report to hmgaudecker [at] gmail\n +Else:\n + Do not load the 'run_m_script' tool in the main wscript.\n\n""" % MATLAB_COMMANDS) + ctx.env.MATLABFLAGS = '-wait -nojvm -nosplash -minimize' + +class run_m_script_base(Task.Task): + """Run a Matlab script.""" + run_str = '"${MATLABCMD}" ${MATLABFLAGS} -logfile "${LOGFILEPATH}" -r "try, ${MSCRIPTTRUNK}, exit(0), catch err, disp(err.getReport()), exit(1), end"' + shell = True + +class run_m_script(run_m_script_base): + """Erase the Matlab overall log file if everything went okay, else raise an + error and print its 10 last lines. + """ + def run(self): + ret = run_m_script_base.run(self) + logfile = self.env.LOGFILEPATH + if ret: + mode = 'r' + if sys.version_info.major >= 3: + mode = 'rb' + with open(logfile, mode=mode) as f: + tail = f.readlines()[-10:] + Logs.error("""Running Matlab on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", + self.inputs[0], ret, logfile, '\n'.join(tail)) + else: + os.remove(logfile) + return ret + +@TaskGen.feature('run_m_script') +@TaskGen.before_method('process_source') +def apply_run_m_script(tg): + """Task generator customising the options etc. to call Matlab in batch + mode for running a m-script. + """ + + # Convert sources and targets to nodes + src_node = tg.path.find_resource(tg.source) + tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] + + tsk = tg.create_task('run_m_script', src=src_node, tgt=tgt_nodes) + tsk.cwd = src_node.parent.abspath() + tsk.env.MSCRIPTTRUNK = os.path.splitext(src_node.name)[0] + tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (tsk.env.MSCRIPTTRUNK, tg.idx)) + + # dependencies (if the attribute 'deps' changes, trigger a recompilation) + for x in tg.to_list(getattr(tg, 'deps', [])): + node = tg.path.find_resource(x) + if not node: + tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) + tsk.dep_nodes.append(node) + Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) + + # Bypass the execution of process_source by setting the source to an empty list + tg.source = [] diff --git a/third_party/waf/waflib/extras/run_py_script.py b/third_party/waf/waflib/extras/run_py_script.py new file mode 100644 index 0000000..3670381 --- /dev/null +++ b/third_party/waf/waflib/extras/run_py_script.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Hans-Martin von Gaudecker, 2012 + +""" +Run a Python script in the directory specified by **ctx.bldnode**. + +Select a Python version by specifying the **version** keyword for +the task generator instance as integer 2 or 3. Default is 3. + +If the build environment has an attribute "PROJECT_PATHS" with +a key "PROJECT_ROOT", its value will be appended to the PYTHONPATH. +Same a string passed to the optional **add_to_pythonpath** +keyword (appended after the PROJECT_ROOT). + +Usage:: + + ctx(features='run_py_script', version=3, + source='some_script.py', + target=['some_table.tex', 'some_figure.eps'], + deps='some_data.csv', + add_to_pythonpath='src/some/library') +""" + +import os, re +from waflib import Task, TaskGen, Logs + + +def configure(conf): + """TODO: Might need to be updated for Windows once + "PEP 397":http://www.python.org/dev/peps/pep-0397/ is settled. + """ + conf.find_program('python', var='PY2CMD', mandatory=False) + conf.find_program('python3', var='PY3CMD', mandatory=False) + if not conf.env.PY2CMD and not conf.env.PY3CMD: + conf.fatal("No Python interpreter found!") + +class run_py_2_script(Task.Task): + """Run a Python 2 script.""" + run_str = '${PY2CMD} ${SRC[0].abspath()}' + shell=True + +class run_py_3_script(Task.Task): + """Run a Python 3 script.""" + run_str = '${PY3CMD} ${SRC[0].abspath()}' + shell=True + +@TaskGen.feature('run_py_script') +@TaskGen.before_method('process_source') +def apply_run_py_script(tg): + """Task generator for running either Python 2 or Python 3 on a single + script. + + Attributes: + + * source -- A **single** source node or string. (required) + * target -- A single target or list of targets (nodes or strings) + * deps -- A single dependency or list of dependencies (nodes or strings) + * add_to_pythonpath -- A string that will be appended to the PYTHONPATH environment variable + + If the build environment has an attribute "PROJECT_PATHS" with + a key "PROJECT_ROOT", its value will be appended to the PYTHONPATH. + """ + + # Set the Python version to use, default to 3. + v = getattr(tg, 'version', 3) + if v not in (2, 3): + raise ValueError("Specify the 'version' attribute for run_py_script task generator as integer 2 or 3.\n Got: %s" %v) + + # Convert sources and targets to nodes + src_node = tg.path.find_resource(tg.source) + tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] + + # Create the task. + tsk = tg.create_task('run_py_%d_script' %v, src=src_node, tgt=tgt_nodes) + + # custom execution environment + # TODO use a list and os.sep.join(lst) at the end instead of concatenating strings + tsk.env.env = dict(os.environ) + tsk.env.env['PYTHONPATH'] = tsk.env.env.get('PYTHONPATH', '') + project_paths = getattr(tsk.env, 'PROJECT_PATHS', None) + if project_paths and 'PROJECT_ROOT' in project_paths: + tsk.env.env['PYTHONPATH'] += os.pathsep + project_paths['PROJECT_ROOT'].abspath() + if getattr(tg, 'add_to_pythonpath', None): + tsk.env.env['PYTHONPATH'] += os.pathsep + tg.add_to_pythonpath + + # Clean up the PYTHONPATH -- replace double occurrences of path separator + tsk.env.env['PYTHONPATH'] = re.sub(os.pathsep + '+', os.pathsep, tsk.env.env['PYTHONPATH']) + + # Clean up the PYTHONPATH -- doesn't like starting with path separator + if tsk.env.env['PYTHONPATH'].startswith(os.pathsep): + tsk.env.env['PYTHONPATH'] = tsk.env.env['PYTHONPATH'][1:] + + # dependencies (if the attribute 'deps' changes, trigger a recompilation) + for x in tg.to_list(getattr(tg, 'deps', [])): + node = tg.path.find_resource(x) + if not node: + tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) + tsk.dep_nodes.append(node) + Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) + + # Bypass the execution of process_source by setting the source to an empty list + tg.source = [] + diff --git a/third_party/waf/waflib/extras/run_r_script.py b/third_party/waf/waflib/extras/run_r_script.py new file mode 100644 index 0000000..b0d8f2b --- /dev/null +++ b/third_party/waf/waflib/extras/run_r_script.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Hans-Martin von Gaudecker, 2012 + +""" +Run a R script in the directory specified by **ctx.bldnode**. + +For error-catching purposes, keep an own log-file that is destroyed if the +task finished without error. If not, it will show up as rscript_[index].log +in the bldnode directory. + +Usage:: + + ctx(features='run_r_script', + source='some_script.r', + target=['some_table.tex', 'some_figure.eps'], + deps='some_data.csv') +""" + + +import os, sys +from waflib import Task, TaskGen, Logs + +R_COMMANDS = ['RTerm', 'R', 'r'] + +def configure(ctx): + ctx.find_program(R_COMMANDS, var='RCMD', errmsg = """\n +No R executable found!\n\n +If R is needed:\n + 1) Check the settings of your system path. + 2) Note we are looking for R executables called: %s + If yours has a different name, please report to hmgaudecker [at] gmail\n +Else:\n + Do not load the 'run_r_script' tool in the main wscript.\n\n""" % R_COMMANDS) + ctx.env.RFLAGS = 'CMD BATCH --slave' + +class run_r_script_base(Task.Task): + """Run a R script.""" + run_str = '"${RCMD}" ${RFLAGS} "${SRC[0].abspath()}" "${LOGFILEPATH}"' + shell = True + +class run_r_script(run_r_script_base): + """Erase the R overall log file if everything went okay, else raise an + error and print its 10 last lines. + """ + def run(self): + ret = run_r_script_base.run(self) + logfile = self.env.LOGFILEPATH + if ret: + mode = 'r' + if sys.version_info.major >= 3: + mode = 'rb' + with open(logfile, mode=mode) as f: + tail = f.readlines()[-10:] + Logs.error("""Running R on %r returned the error %r\n\nCheck the log file %s, last 10 lines\n\n%s\n\n\n""", + self.inputs[0], ret, logfile, '\n'.join(tail)) + else: + os.remove(logfile) + return ret + + +@TaskGen.feature('run_r_script') +@TaskGen.before_method('process_source') +def apply_run_r_script(tg): + """Task generator customising the options etc. to call R in batch + mode for running a R script. + """ + + # Convert sources and targets to nodes + src_node = tg.path.find_resource(tg.source) + tgt_nodes = [tg.path.find_or_declare(t) for t in tg.to_list(tg.target)] + + tsk = tg.create_task('run_r_script', src=src_node, tgt=tgt_nodes) + tsk.env.LOGFILEPATH = os.path.join(tg.bld.bldnode.abspath(), '%s_%d.log' % (os.path.splitext(src_node.name)[0], tg.idx)) + + # dependencies (if the attribute 'deps' changes, trigger a recompilation) + for x in tg.to_list(getattr(tg, 'deps', [])): + node = tg.path.find_resource(x) + if not node: + tg.bld.fatal('Could not find dependency %r for running %r' % (x, src_node.abspath())) + tsk.dep_nodes.append(node) + Logs.debug('deps: found dependencies %r for running %r', tsk.dep_nodes, src_node.abspath()) + + # Bypass the execution of process_source by setting the source to an empty list + tg.source = [] + diff --git a/third_party/waf/waflib/extras/sas.py b/third_party/waf/waflib/extras/sas.py new file mode 100644 index 0000000..754c614 --- /dev/null +++ b/third_party/waf/waflib/extras/sas.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Mark Coggeshall, 2010 + +"SAS support" + +import os +from waflib import Task, Errors, Logs +from waflib.TaskGen import feature, before_method + +sas_fun, _ = Task.compile_fun('sas -sysin ${SRCFILE} -log ${LOGFILE} -print ${LSTFILE}', shell=False) + +class sas(Task.Task): + vars = ['SAS', 'SASFLAGS'] + def run(task): + command = 'SAS' + fun = sas_fun + + node = task.inputs[0] + logfilenode = node.change_ext('.log') + lstfilenode = node.change_ext('.lst') + + # set the cwd + task.cwd = task.inputs[0].parent.get_src().abspath() + Logs.debug('runner: %r on %r', command, node) + + SASINPUTS = node.parent.get_bld().abspath() + os.pathsep + node.parent.get_src().abspath() + os.pathsep + task.env.env = {'SASINPUTS': SASINPUTS} + + task.env.SRCFILE = node.abspath() + task.env.LOGFILE = logfilenode.abspath() + task.env.LSTFILE = lstfilenode.abspath() + ret = fun(task) + if ret: + Logs.error('Running %s on %r returned a non-zero exit', command, node) + Logs.error('SRCFILE = %r', node) + Logs.error('LOGFILE = %r', logfilenode) + Logs.error('LSTFILE = %r', lstfilenode) + return ret + +@feature('sas') +@before_method('process_source') +def apply_sas(self): + if not getattr(self, 'type', None) in ('sas',): + self.type = 'sas' + + self.env['logdir'] = getattr(self, 'logdir', 'log') + self.env['lstdir'] = getattr(self, 'lstdir', 'lst') + + deps_lst = [] + + if getattr(self, 'deps', None): + deps = self.to_list(self.deps) + for filename in deps: + n = self.path.find_resource(filename) + if not n: + n = self.bld.root.find_resource(filename) + if not n: + raise Errors.WafError('cannot find input file %s for processing' % filename) + if not n in deps_lst: + deps_lst.append(n) + + for node in self.to_nodes(self.source): + if self.type == 'sas': + task = self.create_task('sas', src=node) + task.dep_nodes = deps_lst + self.source = [] + +def configure(self): + self.find_program('sas', var='SAS', mandatory=False) + diff --git a/third_party/waf/waflib/extras/satellite_assembly.py b/third_party/waf/waflib/extras/satellite_assembly.py new file mode 100644 index 0000000..005eb07 --- /dev/null +++ b/third_party/waf/waflib/extras/satellite_assembly.py @@ -0,0 +1,57 @@ +#!/usr/bin/python +# encoding: utf-8 +# vim: tabstop=4 noexpandtab + +""" +Create a satellite assembly from "*.??.txt" files. ?? stands for a language code. + +The projects Resources subfolder contains resources.??.txt string files for several languages. +The build folder will hold the satellite assemblies as ./??/ExeName.resources.dll + +#gen becomes template (It is called gen because it also uses resx.py). +bld(source='Resources/resources.de.txt',gen=ExeName) +""" + +import os, re +from waflib import Task +from waflib.TaskGen import feature,before_method + +class al(Task.Task): + run_str = '${AL} ${ALFLAGS}' + +@feature('satellite_assembly') +@before_method('process_source') +def satellite_assembly(self): + if not getattr(self, 'gen', None): + self.bld.fatal('satellite_assembly needs a template assembly provided with the "gen" parameter') + res_lang = re.compile(r'(.*)\.(\w\w)\.(?:resx|txt)',flags=re.I) + + # self.source can contain node objects, so this will break in one way or another + self.source = self.to_list(self.source) + for i, x in enumerate(self.source): + #x = 'resources/resources.de.resx' + #x = 'resources/resources.de.txt' + mo = res_lang.match(x) + if mo: + template = os.path.splitext(self.gen)[0] + templatedir, templatename = os.path.split(template) + res = mo.group(1) + lang = mo.group(2) + #./Resources/resources.de.resources + resources = self.path.find_or_declare(res+ '.' + lang + '.resources') + self.create_task('resgen', self.to_nodes(x), [resources]) + #./de/Exename.resources.dll + satellite = self.path.find_or_declare(os.path.join(templatedir,lang,templatename) + '.resources.dll') + tsk = self.create_task('al',[resources],[satellite]) + tsk.env.append_value('ALFLAGS','/template:'+os.path.join(self.path.relpath(),self.gen)) + tsk.env.append_value('ALFLAGS','/embed:'+resources.relpath()) + tsk.env.append_value('ALFLAGS','/culture:'+lang) + tsk.env.append_value('ALFLAGS','/out:'+satellite.relpath()) + self.source[i] = None + # remove the None elements that we just substituted + self.source = list(filter(lambda x:x, self.source)) + +def configure(ctx): + ctx.find_program('al', var='AL', mandatory=True) + ctx.load('resx') + diff --git a/third_party/waf/waflib/extras/scala.py b/third_party/waf/waflib/extras/scala.py new file mode 100644 index 0000000..a9880f0 --- /dev/null +++ b/third_party/waf/waflib/extras/scala.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010 (ita) + +""" +Scala support + +scalac outputs files a bit where it wants to +""" + +import os +from waflib import Task, Utils, Node +from waflib.TaskGen import feature, before_method, after_method + +from waflib.Tools import ccroot +ccroot.USELIB_VARS['scalac'] = set(['CLASSPATH', 'SCALACFLAGS']) + +from waflib.Tools import javaw + +@feature('scalac') +@before_method('process_source') +def apply_scalac(self): + + Utils.def_attrs(self, jarname='', classpath='', + sourcepath='.', srcdir='.', + jar_mf_attributes={}, jar_mf_classpath=[]) + + outdir = getattr(self, 'outdir', None) + if outdir: + if not isinstance(outdir, Node.Node): + outdir = self.path.get_bld().make_node(self.outdir) + else: + outdir = self.path.get_bld() + outdir.mkdir() + self.env['OUTDIR'] = outdir.abspath() + + self.scalac_task = tsk = self.create_task('scalac') + tmp = [] + + srcdir = getattr(self, 'srcdir', '') + if isinstance(srcdir, Node.Node): + srcdir = [srcdir] + for x in Utils.to_list(srcdir): + if isinstance(x, Node.Node): + y = x + else: + y = self.path.find_dir(x) + if not y: + self.bld.fatal('Could not find the folder %s from %s' % (x, self.path)) + tmp.append(y) + tsk.srcdir = tmp + +# reuse some code +feature('scalac')(javaw.use_javac_files) +after_method('apply_scalac')(javaw.use_javac_files) + +feature('scalac')(javaw.set_classpath) +after_method('apply_scalac', 'use_scalac_files')(javaw.set_classpath) + + +SOURCE_RE = '**/*.scala' +class scalac(javaw.javac): + color = 'GREEN' + vars = ['CLASSPATH', 'SCALACFLAGS', 'SCALAC', 'OUTDIR'] + + def runnable_status(self): + """ + Wait for dependent tasks to be complete, then read the file system to find the input nodes. + """ + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + + if not self.inputs: + global SOURCE_RE + self.inputs = [] + for x in self.srcdir: + self.inputs.extend(x.ant_glob(SOURCE_RE, remove=False)) + return super(javaw.javac, self).runnable_status() + + def run(self): + """ + Execute the scalac compiler + """ + env = self.env + gen = self.generator + bld = gen.bld + wd = bld.bldnode.abspath() + def to_list(xx): + if isinstance(xx, str): + return [xx] + return xx + self.last_cmd = lst = [] + lst.extend(to_list(env['SCALAC'])) + lst.extend(['-classpath']) + lst.extend(to_list(env['CLASSPATH'])) + lst.extend(['-d']) + lst.extend(to_list(env['OUTDIR'])) + lst.extend(to_list(env['SCALACFLAGS'])) + lst.extend([a.abspath() for a in self.inputs]) + lst = [x for x in lst if x] + try: + self.out = self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None, output=0, quiet=0)[1] + except: + self.generator.bld.cmd_and_log(lst, cwd=wd, env=env.env or None) + +def configure(self): + """ + Detect the scalac program + """ + # If SCALA_HOME is set, we prepend it to the path list + java_path = self.environ['PATH'].split(os.pathsep) + v = self.env + + if 'SCALA_HOME' in self.environ: + java_path = [os.path.join(self.environ['SCALA_HOME'], 'bin')] + java_path + self.env['SCALA_HOME'] = [self.environ['SCALA_HOME']] + + for x in 'scalac scala'.split(): + self.find_program(x, var=x.upper(), path_list=java_path) + + if 'CLASSPATH' in self.environ: + v['CLASSPATH'] = self.environ['CLASSPATH'] + + v.SCALACFLAGS = ['-verbose'] + if not v['SCALAC']: + self.fatal('scalac is required for compiling scala classes') + diff --git a/third_party/waf/waflib/extras/slow_qt4.py b/third_party/waf/waflib/extras/slow_qt4.py new file mode 100644 index 0000000..ec7880b --- /dev/null +++ b/third_party/waf/waflib/extras/slow_qt4.py @@ -0,0 +1,96 @@ +#! /usr/bin/env python +# Thomas Nagy, 2011 (ita) + +""" +Create _moc.cpp files + +The builds are 30-40% faster when .moc files are included, +you should NOT use this tool. If you really +really want it: + +def configure(conf): + conf.load('compiler_cxx qt4') + conf.load('slow_qt4') + +See playground/slow_qt/wscript for a complete example. +""" + +from waflib.TaskGen import extension +from waflib import Task +import waflib.Tools.qt4 +import waflib.Tools.cxx + +@extension(*waflib.Tools.qt4.EXT_QT4) +def cxx_hook(self, node): + return self.create_compiled_task('cxx_qt', node) + +class cxx_qt(Task.classes['cxx']): + def runnable_status(self): + ret = Task.classes['cxx'].runnable_status(self) + if ret != Task.ASK_LATER and not getattr(self, 'moc_done', None): + + try: + cache = self.generator.moc_cache + except AttributeError: + cache = self.generator.moc_cache = {} + + deps = self.generator.bld.node_deps[self.uid()] + for x in [self.inputs[0]] + deps: + if x.read().find('Q_OBJECT') > 0: + + # process "foo.h -> foo.moc" only if "foo.cpp" is in the sources for the current task generator + # this code will work because it is in the main thread (runnable_status) + if x.name.rfind('.') > -1: # a .h file... + name = x.name[:x.name.rfind('.')] + for tsk in self.generator.compiled_tasks: + if tsk.inputs and tsk.inputs[0].name.startswith(name): + break + else: + # no corresponding file, continue + continue + + # the file foo.cpp could be compiled for a static and a shared library - hence the %number in the name + cxx_node = x.parent.get_bld().make_node(x.name.replace('.', '_') + '_%d_moc.cpp' % self.generator.idx) + if cxx_node in cache: + continue + cache[cxx_node] = self + + tsk = Task.classes['moc'](env=self.env, generator=self.generator) + tsk.set_inputs(x) + tsk.set_outputs(cxx_node) + + if x.name.endswith('.cpp'): + # moc is trying to be too smart but it is too dumb: + # why forcing the #include when Q_OBJECT is in the cpp file? + gen = self.generator.bld.producer + gen.outstanding.append(tsk) + gen.total += 1 + self.set_run_after(tsk) + else: + cxxtsk = Task.classes['cxx'](env=self.env, generator=self.generator) + cxxtsk.set_inputs(tsk.outputs) + cxxtsk.set_outputs(cxx_node.change_ext('.o')) + cxxtsk.set_run_after(tsk) + + try: + self.more_tasks.extend([tsk, cxxtsk]) + except AttributeError: + self.more_tasks = [tsk, cxxtsk] + + try: + link = self.generator.link_task + except AttributeError: + pass + else: + link.set_run_after(cxxtsk) + link.inputs.extend(cxxtsk.outputs) + link.inputs.sort(key=lambda x: x.abspath()) + + self.moc_done = True + + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + + return ret + diff --git a/third_party/waf/waflib/extras/softlink_libs.py b/third_party/waf/waflib/extras/softlink_libs.py new file mode 100644 index 0000000..50c777f --- /dev/null +++ b/third_party/waf/waflib/extras/softlink_libs.py @@ -0,0 +1,76 @@ +#! /usr/bin/env python +# per rosengren 2011 + +from waflib.TaskGen import feature, after_method +from waflib.Task import Task, always_run +from os.path import basename, isabs +from os import tmpfile, linesep + +def options(opt): + grp = opt.add_option_group('Softlink Libraries Options') + grp.add_option('--exclude', default='/usr/lib,/lib', help='No symbolic links are created for libs within [%default]') + +def configure(cnf): + cnf.find_program('ldd') + if not cnf.env.SOFTLINK_EXCLUDE: + cnf.env.SOFTLINK_EXCLUDE = cnf.options.exclude.split(',') + +@feature('softlink_libs') +@after_method('process_rule') +def add_finder(self): + tgt = self.path.find_or_declare(self.target) + self.create_task('sll_finder', tgt=tgt) + self.create_task('sll_installer', tgt=tgt) + always_run(sll_installer) + +class sll_finder(Task): + ext_out = 'softlink_libs' + def run(self): + bld = self.generator.bld + linked=[] + target_paths = [] + for g in bld.groups: + for tgen in g: + # FIXME it might be better to check if there is a link_task (getattr?) + target_paths += [tgen.path.get_bld().bldpath()] + linked += [t.outputs[0].bldpath() + for t in getattr(tgen, 'tasks', []) + if t.__class__.__name__ in + ['cprogram', 'cshlib', 'cxxprogram', 'cxxshlib']] + lib_list = [] + if len(linked): + cmd = [self.env.LDD] + linked + # FIXME add DYLD_LIBRARY_PATH+PATH for osx+win32 + ldd_env = {'LD_LIBRARY_PATH': ':'.join(target_paths + self.env.LIBPATH)} + # FIXME the with syntax will not work in python 2 + with tmpfile() as result: + self.exec_command(cmd, env=ldd_env, stdout=result) + result.seek(0) + for line in result.readlines(): + words = line.split() + if len(words) < 3 or words[1] != '=>': + continue + lib = words[2] + if lib == 'not': + continue + if any([lib.startswith(p) for p in + [bld.bldnode.abspath(), '('] + + self.env.SOFTLINK_EXCLUDE]): + continue + if not isabs(lib): + continue + lib_list.append(lib) + lib_list = sorted(set(lib_list)) + self.outputs[0].write(linesep.join(lib_list + self.env.DYNAMIC_LIBS)) + return 0 + +class sll_installer(Task): + ext_in = 'softlink_libs' + def run(self): + tgt = self.outputs[0] + self.generator.bld.install_files('${LIBDIR}', tgt, postpone=False) + lib_list=tgt.read().split() + for lib in lib_list: + self.generator.bld.symlink_as('${LIBDIR}/'+basename(lib), lib, postpone=False) + return 0 + diff --git a/third_party/waf/waflib/extras/sphinx.py b/third_party/waf/waflib/extras/sphinx.py new file mode 100644 index 0000000..08f3cfd --- /dev/null +++ b/third_party/waf/waflib/extras/sphinx.py @@ -0,0 +1,120 @@ +"""Support for Sphinx documentation + +This is a wrapper for sphinx-build program. Please note that sphinx-build supports only +one output format at a time, but the tool can create multiple tasks to handle more. +The output formats can be passed via the sphinx_output_format, which is an array of +strings. For backwards compatibility if only one output is needed, it can be passed +as a single string. +The default output format is html. + +Specific formats can be installed in different directories by specifying the +install_path_<FORMAT> attribute. If not defined, the standard install_path +will be used instead. + +Example wscript: + +def configure(cnf): + conf.load('sphinx') + +def build(bld): + bld( + features='sphinx', + sphinx_source='sources', # path to source directory + sphinx_options='-a -v', # sphinx-build program additional options + sphinx_output_format=['html', 'man'], # output format of sphinx documentation + install_path_man='${DOCDIR}/man' # put man pages in a specific directory + ) + +""" + +from waflib.Node import Node +from waflib import Utils +from waflib import Task +from waflib.TaskGen import feature, after_method + + +def configure(cnf): + """Check if sphinx-build program is available and loads gnu_dirs tool.""" + cnf.find_program('sphinx-build', var='SPHINX_BUILD', mandatory=False) + cnf.load('gnu_dirs') + + +@feature('sphinx') +def build_sphinx(self): + """Builds sphinx sources. + """ + if not self.env.SPHINX_BUILD: + self.bld.fatal('Program SPHINX_BUILD not defined.') + if not getattr(self, 'sphinx_source', None): + self.bld.fatal('Attribute sphinx_source not defined.') + if not isinstance(self.sphinx_source, Node): + self.sphinx_source = self.path.find_node(self.sphinx_source) + if not self.sphinx_source: + self.bld.fatal('Can\'t find sphinx_source: %r' % self.sphinx_source) + + # In the taskgen we have the complete list of formats + Utils.def_attrs(self, sphinx_output_format='html') + self.sphinx_output_format = Utils.to_list(self.sphinx_output_format) + + self.env.SPHINX_OPTIONS = getattr(self, 'sphinx_options', []) + + for source_file in self.sphinx_source.ant_glob('**/*'): + self.bld.add_manual_dependency(self.sphinx_source, source_file) + + for cfmt in self.sphinx_output_format: + sphinx_build_task = self.create_task('SphinxBuildingTask') + sphinx_build_task.set_inputs(self.sphinx_source) + # In task we keep the specific format this task is generating + sphinx_build_task.env.SPHINX_OUTPUT_FORMAT = cfmt + + # the sphinx-build results are in <build + output_format> directory + sphinx_build_task.sphinx_output_directory = self.path.get_bld().make_node(cfmt) + sphinx_build_task.set_outputs(sphinx_build_task.sphinx_output_directory) + sphinx_build_task.sphinx_output_directory.mkdir() + + Utils.def_attrs(sphinx_build_task, install_path=getattr(self, 'install_path_' + cfmt, getattr(self, 'install_path', get_install_path(sphinx_build_task)))) + + +def get_install_path(object): + if object.env.SPHINX_OUTPUT_FORMAT == 'man': + return object.env.MANDIR + elif object.env.SPHINX_OUTPUT_FORMAT == 'info': + return object.env.INFODIR + else: + return object.env.DOCDIR + + +class SphinxBuildingTask(Task.Task): + color = 'BOLD' + run_str = '${SPHINX_BUILD} -M ${SPHINX_OUTPUT_FORMAT} ${SRC} ${TGT} -d ${TGT[0].bld_dir()}/doctrees-${SPHINX_OUTPUT_FORMAT} ${SPHINX_OPTIONS}' + + def keyword(self): + return 'Compiling (%s)' % self.env.SPHINX_OUTPUT_FORMAT + + def runnable_status(self): + + for x in self.run_after: + if not x.hasrun: + return Task.ASK_LATER + + self.signature() + ret = Task.Task.runnable_status(self) + if ret == Task.SKIP_ME: + # in case the files were removed + self.add_install() + return ret + + + def post_run(self): + self.add_install() + return Task.Task.post_run(self) + + + def add_install(self): + nodes = self.sphinx_output_directory.ant_glob('**/*', quiet=True) + self.outputs += nodes + self.generator.add_install_files(install_to=self.install_path, + install_from=nodes, + postpone=False, + cwd=self.sphinx_output_directory.make_node(self.env.SPHINX_OUTPUT_FORMAT), + relative_trick=True) diff --git a/third_party/waf/waflib/extras/stale.py b/third_party/waf/waflib/extras/stale.py new file mode 100644 index 0000000..cac3f46 --- /dev/null +++ b/third_party/waf/waflib/extras/stale.py @@ -0,0 +1,98 @@ +#! /usr/bin/env python +# encoding: UTF-8 +# Thomas Nagy, 2006-2015 (ita) + +""" +Add a pre-build hook to remove build files (declared in the system) +that do not have a corresponding target + +This can be used for example to remove the targets +that have changed name without performing +a full 'waf clean' + +Of course, it will only work if there are no dynamically generated +nodes/tasks, in which case the method will have to be modified +to exclude some folders for example. + +Make sure to set bld.post_mode = waflib.Build.POST_AT_ONCE +""" + +from waflib import Logs, Build +from waflib.Runner import Parallel + +DYNAMIC_EXT = [] # add your non-cleanable files/extensions here +MOC_H_EXTS = '.cpp .cxx .hpp .hxx .h'.split() + +def can_delete(node): + """Imperfect moc cleanup which does not look for a Q_OBJECT macro in the files""" + if not node.name.endswith('.moc'): + return True + base = node.name[:-4] + p1 = node.parent.get_src() + p2 = node.parent.get_bld() + for k in MOC_H_EXTS: + h_name = base + k + n = p1.search_node(h_name) + if n: + return False + n = p2.search_node(h_name) + if n: + return False + + # foo.cpp.moc, foo.h.moc, etc. + if base.endswith(k): + return False + + return True + +# recursion over the nodes to find the stale files +def stale_rec(node, nodes): + if node.abspath() in node.ctx.env[Build.CFG_FILES]: + return + + if getattr(node, 'children', []): + for x in node.children.values(): + if x.name != "c4che": + stale_rec(x, nodes) + else: + for ext in DYNAMIC_EXT: + if node.name.endswith(ext): + break + else: + if not node in nodes: + if can_delete(node): + Logs.warn('Removing stale file -> %r', node) + node.delete() + +old = Parallel.refill_task_list +def refill_task_list(self): + iit = old(self) + bld = self.bld + + # execute this operation only once + if getattr(self, 'stale_done', False): + return iit + self.stale_done = True + + # this does not work in partial builds + if bld.targets != '*': + return iit + + # this does not work in dynamic builds + if getattr(bld, 'post_mode') == Build.POST_AT_ONCE: + return iit + + # obtain the nodes to use during the build + nodes = [] + for tasks in bld.groups: + for x in tasks: + try: + nodes.extend(x.outputs) + except AttributeError: + pass + + stale_rec(bld.bldnode, nodes) + return iit + +Parallel.refill_task_list = refill_task_list + diff --git a/third_party/waf/waflib/extras/stracedeps.py b/third_party/waf/waflib/extras/stracedeps.py new file mode 100644 index 0000000..37d82cb --- /dev/null +++ b/third_party/waf/waflib/extras/stracedeps.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2015 (ita) + +""" +Execute tasks through strace to obtain dependencies after the process is run. This +scheme is similar to that of the Fabricate script. + +To use:: + + def configure(conf): + conf.load('strace') + +WARNING: +* This will not work when advanced scanners are needed (qt4/qt5) +* The overhead of running 'strace' is significant (56s -> 1m29s) +* It will not work on Windows :-) +""" + +import os, re, threading +from waflib import Task, Logs, Utils + +#TRACECALLS = 'trace=access,chdir,clone,creat,execve,exit_group,fork,lstat,lstat64,mkdir,open,rename,stat,stat64,symlink,vfork' +TRACECALLS = 'trace=process,file' + +BANNED = ('/tmp', '/proc', '/sys', '/dev') + +s_process = r'(?:clone|fork|vfork)\(.*?(?P<npid>\d+)' +s_file = r'(?P<call>\w+)\("(?P<path>([^"\\]|\\.)*)"(.*)' +re_lines = re.compile(r'^(?P<pid>\d+)\s+(?:(?:%s)|(?:%s))\r*$' % (s_file, s_process), re.IGNORECASE | re.MULTILINE) +strace_lock = threading.Lock() + +def configure(conf): + conf.find_program('strace') + +def task_method(func): + # Decorator function to bind/replace methods on the base Task class + # + # The methods Task.exec_command and Task.sig_implicit_deps already exists and are rarely overridden + # we thus expect that we are the only ones doing this + try: + setattr(Task.Task, 'nostrace_%s' % func.__name__, getattr(Task.Task, func.__name__)) + except AttributeError: + pass + setattr(Task.Task, func.__name__, func) + return func + +@task_method +def get_strace_file(self): + try: + return self.strace_file + except AttributeError: + pass + + if self.outputs: + ret = self.outputs[0].abspath() + '.strace' + else: + ret = '%s%s%d%s' % (self.generator.bld.bldnode.abspath(), os.sep, id(self), '.strace') + self.strace_file = ret + return ret + +@task_method +def get_strace_args(self): + return (self.env.STRACE or ['strace']) + ['-e', TRACECALLS, '-f', '-o', self.get_strace_file()] + +@task_method +def exec_command(self, cmd, **kw): + bld = self.generator.bld + if not 'cwd' in kw: + kw['cwd'] = self.get_cwd() + + args = self.get_strace_args() + fname = self.get_strace_file() + if isinstance(cmd, list): + cmd = args + cmd + else: + cmd = '%s %s' % (' '.join(args), cmd) + + try: + ret = bld.exec_command(cmd, **kw) + finally: + if not ret: + self.parse_strace_deps(fname, kw['cwd']) + return ret + +@task_method +def sig_implicit_deps(self): + # bypass the scanner functions + return + +@task_method +def parse_strace_deps(self, path, cwd): + # uncomment the following line to disable the dependencies and force a file scan + # return + try: + cnt = Utils.readf(path) + finally: + try: + os.remove(path) + except OSError: + pass + + if not isinstance(cwd, str): + cwd = cwd.abspath() + + nodes = [] + bld = self.generator.bld + try: + cache = bld.strace_cache + except AttributeError: + cache = bld.strace_cache = {} + + # chdir and relative paths + pid_to_cwd = {} + + global BANNED + done = set() + for m in re.finditer(re_lines, cnt): + # scraping the output of strace + pid = m.group('pid') + if m.group('npid'): + npid = m.group('npid') + pid_to_cwd[npid] = pid_to_cwd.get(pid, cwd) + continue + + p = m.group('path').replace('\\"', '"') + + if p == '.' or m.group().find('= -1 ENOENT') > -1: + # just to speed it up a bit + continue + + if not os.path.isabs(p): + p = os.path.join(pid_to_cwd.get(pid, cwd), p) + + call = m.group('call') + if call == 'chdir': + pid_to_cwd[pid] = p + continue + + if p in done: + continue + done.add(p) + + for x in BANNED: + if p.startswith(x): + break + else: + if p.endswith('/') or os.path.isdir(p): + continue + + try: + node = cache[p] + except KeyError: + strace_lock.acquire() + try: + cache[p] = node = bld.root.find_node(p) + if not node: + continue + finally: + strace_lock.release() + nodes.append(node) + + # record the dependencies then force the task signature recalculation for next time + if Logs.verbose: + Logs.debug('deps: real scanner for %r returned %r', self, nodes) + bld = self.generator.bld + bld.node_deps[self.uid()] = nodes + bld.raw_deps[self.uid()] = [] + try: + del self.cache_sig + except AttributeError: + pass + self.signature() + diff --git a/third_party/waf/waflib/extras/swig.py b/third_party/waf/waflib/extras/swig.py new file mode 100644 index 0000000..967caeb --- /dev/null +++ b/third_party/waf/waflib/extras/swig.py @@ -0,0 +1,237 @@ +#! /usr/bin/env python +# encoding: UTF-8 +# Petar Forai +# Thomas Nagy 2008-2010 (ita) + +import re +from waflib import Task, Logs +from waflib.TaskGen import extension, feature, after_method +from waflib.Configure import conf +from waflib.Tools import c_preproc + +""" +tasks have to be added dynamically: +- swig interface files may be created at runtime +- the module name may be unknown in advance +""" + +SWIG_EXTS = ['.swig', '.i'] + +re_module = re.compile(r'%module(?:\s*\(.*\))?\s+([^\r\n]+)', re.M) + +re_1 = re.compile(r'^%module.*?\s+([\w]+)\s*?$', re.M) +re_2 = re.compile(r'[#%](?:include|import(?:\(module=".*"\))+|python(?:begin|code)) [<"](.*)[">]', re.M) + +class swig(Task.Task): + color = 'BLUE' + run_str = '${SWIG} ${SWIGFLAGS} ${SWIGPATH_ST:INCPATHS} ${SWIGDEF_ST:DEFINES} ${SRC}' + ext_out = ['.h'] # might produce .h files although it is not mandatory + vars = ['SWIG_VERSION', 'SWIGDEPS'] + + def runnable_status(self): + for t in self.run_after: + if not t.hasrun: + return Task.ASK_LATER + + if not getattr(self, 'init_outputs', None): + self.init_outputs = True + if not getattr(self, 'module', None): + # search the module name + txt = self.inputs[0].read() + m = re_module.search(txt) + if not m: + raise ValueError("could not find the swig module name") + self.module = m.group(1) + + swig_c(self) + + # add the language-specific output files as nodes + # call funs in the dict swig_langs + for x in self.env['SWIGFLAGS']: + # obtain the language + x = x[1:] + try: + fun = swig_langs[x] + except KeyError: + pass + else: + fun(self) + + return super(swig, self).runnable_status() + + def scan(self): + "scan for swig dependencies, climb the .i files" + lst_src = [] + + seen = [] + missing = [] + to_see = [self.inputs[0]] + + while to_see: + node = to_see.pop(0) + if node in seen: + continue + seen.append(node) + lst_src.append(node) + + # read the file + code = node.read() + code = c_preproc.re_nl.sub('', code) + code = c_preproc.re_cpp.sub(c_preproc.repl, code) + + # find .i files and project headers + names = re_2.findall(code) + for n in names: + for d in self.generator.includes_nodes + [node.parent]: + u = d.find_resource(n) + if u: + to_see.append(u) + break + else: + missing.append(n) + return (lst_src, missing) + +# provide additional language processing +swig_langs = {} +def swigf(fun): + swig_langs[fun.__name__.replace('swig_', '')] = fun + return fun +swig.swigf = swigf + +def swig_c(self): + ext = '.swigwrap_%d.c' % self.generator.idx + flags = self.env['SWIGFLAGS'] + if '-c++' in flags: + ext += 'xx' + out_node = self.inputs[0].parent.find_or_declare(self.module + ext) + + if '-c++' in flags: + c_tsk = self.generator.cxx_hook(out_node) + else: + c_tsk = self.generator.c_hook(out_node) + + c_tsk.set_run_after(self) + + # transfer weights from swig task to c task + if getattr(self, 'weight', None): + c_tsk.weight = self.weight + if getattr(self, 'tree_weight', None): + c_tsk.tree_weight = self.tree_weight + + try: + self.more_tasks.append(c_tsk) + except AttributeError: + self.more_tasks = [c_tsk] + + try: + ltask = self.generator.link_task + except AttributeError: + pass + else: + ltask.set_run_after(c_tsk) + # setting input nodes does not declare the build order + # because the build already started, but it sets + # the dependency to enable rebuilds + ltask.inputs.append(c_tsk.outputs[0]) + + self.outputs.append(out_node) + + if not '-o' in self.env['SWIGFLAGS']: + self.env.append_value('SWIGFLAGS', ['-o', self.outputs[0].abspath()]) + +@swigf +def swig_python(tsk): + node = tsk.inputs[0].parent + if tsk.outdir: + node = tsk.outdir + tsk.set_outputs(node.find_or_declare(tsk.module+'.py')) + +@swigf +def swig_ocaml(tsk): + node = tsk.inputs[0].parent + if tsk.outdir: + node = tsk.outdir + tsk.set_outputs(node.find_or_declare(tsk.module+'.ml')) + tsk.set_outputs(node.find_or_declare(tsk.module+'.mli')) + +@extension(*SWIG_EXTS) +def i_file(self, node): + # the task instance + tsk = self.create_task('swig') + tsk.set_inputs(node) + tsk.module = getattr(self, 'swig_module', None) + + flags = self.to_list(getattr(self, 'swig_flags', [])) + tsk.env.append_value('SWIGFLAGS', flags) + + tsk.outdir = None + if '-outdir' in flags: + outdir = flags[flags.index('-outdir')+1] + outdir = tsk.generator.bld.bldnode.make_node(outdir) + outdir.mkdir() + tsk.outdir = outdir + +@feature('c', 'cxx', 'd', 'fc', 'asm') +@after_method('apply_link', 'process_source') +def enforce_swig_before_link(self): + try: + link_task = self.link_task + except AttributeError: + pass + else: + for x in self.tasks: + if x.__class__.__name__ == 'swig': + link_task.run_after.add(x) + +@conf +def check_swig_version(conf, minver=None): + """ + Check if the swig tool is found matching a given minimum version. + minver should be a tuple, eg. to check for swig >= 1.3.28 pass (1,3,28) as minver. + + If successful, SWIG_VERSION is defined as 'MAJOR.MINOR' + (eg. '1.3') of the actual swig version found. + + :param minver: minimum version + :type minver: tuple of int + :return: swig version + :rtype: tuple of int + """ + assert minver is None or isinstance(minver, tuple) + swigbin = conf.env['SWIG'] + if not swigbin: + conf.fatal('could not find the swig executable') + + # Get swig version string + cmd = swigbin + ['-version'] + Logs.debug('swig: Running swig command %r', cmd) + reg_swig = re.compile(r'SWIG Version\s(.*)', re.M) + swig_out = conf.cmd_and_log(cmd) + swigver_tuple = tuple([int(s) for s in reg_swig.findall(swig_out)[0].split('.')]) + + # Compare swig version with the minimum required + result = (minver is None) or (swigver_tuple >= minver) + + if result: + # Define useful environment variables + swigver = '.'.join([str(x) for x in swigver_tuple[:2]]) + conf.env['SWIG_VERSION'] = swigver + + # Feedback + swigver_full = '.'.join(map(str, swigver_tuple[:3])) + if minver is None: + conf.msg('Checking for swig version', swigver_full) + else: + minver_str = '.'.join(map(str, minver)) + conf.msg('Checking for swig version >= %s' % (minver_str,), swigver_full, color=result and 'GREEN' or 'YELLOW') + + if not result: + conf.fatal('The swig version is too old, expecting %r' % (minver,)) + + return swigver_tuple + +def configure(conf): + conf.find_program('swig', var='SWIG') + conf.env.SWIGPATH_ST = '-I%s' + conf.env.SWIGDEF_ST = '-D%s' + diff --git a/third_party/waf/waflib/extras/syms.py b/third_party/waf/waflib/extras/syms.py new file mode 100644 index 0000000..562f708 --- /dev/null +++ b/third_party/waf/waflib/extras/syms.py @@ -0,0 +1,84 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +This tool supports the export_symbols_regex to export the symbols in a shared library. +by default, all symbols are exported by gcc, and nothing by msvc. +to use the tool, do something like: + +def build(ctx): + ctx(features='c cshlib syms', source='a.c b.c', export_symbols_regex='mylib_.*', target='testlib') + +only the symbols starting with 'mylib_' will be exported. +""" + +import re +from waflib.Context import STDOUT +from waflib.Task import Task +from waflib.Errors import WafError +from waflib.TaskGen import feature, after_method + +class gen_sym(Task): + def run(self): + obj = self.inputs[0] + kw = {} + + reg = getattr(self.generator, 'export_symbols_regex', '.+?') + if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME): + re_nm = re.compile(r'External\s+\|\s+_(?P<symbol>%s)\b' % reg) + cmd = (self.env.DUMPBIN or ['dumpbin']) + ['/symbols', obj.abspath()] + else: + if self.env.DEST_BINFMT == 'pe': #gcc uses nm, and has a preceding _ on windows + re_nm = re.compile(r'(T|D)\s+_(?P<symbol>%s)\b' % reg) + elif self.env.DEST_BINFMT=='mac-o': + re_nm=re.compile(r'(T|D)\s+(?P<symbol>_?(%s))\b' % reg) + else: + re_nm = re.compile(r'(T|D)\s+(?P<symbol>%s)\b' % reg) + cmd = (self.env.NM or ['nm']) + ['-g', obj.abspath()] + syms = [m.group('symbol') for m in re_nm.finditer(self.generator.bld.cmd_and_log(cmd, quiet=STDOUT, **kw))] + self.outputs[0].write('%r' % syms) + +class compile_sym(Task): + def run(self): + syms = {} + for x in self.inputs: + slist = eval(x.read()) + for s in slist: + syms[s] = 1 + lsyms = list(syms.keys()) + lsyms.sort() + if self.env.DEST_BINFMT == 'pe': + self.outputs[0].write('EXPORTS\n' + '\n'.join(lsyms)) + elif self.env.DEST_BINFMT == 'elf': + self.outputs[0].write('{ global:\n' + ';\n'.join(lsyms) + ";\nlocal: *; };\n") + elif self.env.DEST_BINFMT=='mac-o': + self.outputs[0].write('\n'.join(lsyms) + '\n') + else: + raise WafError('NotImplemented') + +@feature('syms') +@after_method('process_source', 'process_use', 'apply_link', 'process_uselib_local', 'propagate_uselib_vars') +def do_the_symbol_stuff(self): + def_node = self.path.find_or_declare(getattr(self, 'sym_file', self.target + '.def')) + compiled_tasks = getattr(self, 'compiled_tasks', None) + if compiled_tasks: + ins = [x.outputs[0] for x in compiled_tasks] + self.gen_sym_tasks = [self.create_task('gen_sym', x, x.change_ext('.%d.sym' % self.idx)) for x in ins] + self.create_task('compile_sym', [x.outputs[0] for x in self.gen_sym_tasks], def_node) + + link_task = getattr(self, 'link_task', None) + if link_task: + self.link_task.dep_nodes.append(def_node) + + if 'msvc' in (self.env.CC_NAME, self.env.CXX_NAME): + self.link_task.env.append_value('LINKFLAGS', ['/def:' + def_node.bldpath()]) + elif self.env.DEST_BINFMT == 'pe': + # gcc on windows takes *.def as an additional input + self.link_task.inputs.append(def_node) + elif self.env.DEST_BINFMT == 'elf': + self.link_task.env.append_value('LINKFLAGS', ['-Wl,-version-script', '-Wl,' + def_node.bldpath()]) + elif self.env.DEST_BINFMT=='mac-o': + self.link_task.env.append_value('LINKFLAGS',['-Wl,-exported_symbols_list,' + def_node.bldpath()]) + else: + raise WafError('NotImplemented') + diff --git a/third_party/waf/waflib/extras/ticgt.py b/third_party/waf/waflib/extras/ticgt.py new file mode 100644 index 0000000..f43a7ea --- /dev/null +++ b/third_party/waf/waflib/extras/ticgt.py @@ -0,0 +1,300 @@ +#!/usr/bin/env python +# encoding: utf-8 + +# Texas Instruments code generator support (experimental) +# When reporting issues, please directly assign the bug to the maintainer. + +__author__ = __maintainer__ = "Jérôme Carretero <cJ-waf@zougloub.eu>" +__copyright__ = "Jérôme Carretero, 2012" + +""" +TI cgt6x is a compiler suite for TI DSPs. + +The toolchain does pretty weird things, and I'm sure I'm missing some of them. +But still, the tool saves time. + +What this tool does is: + +- create a TI compiler environment +- create TI compiler features, to handle some specifics about this compiler + It has a few idiosyncracies, such as not giving the liberty of the .o file names +- automatically activate them when using the TI compiler +- handle the tconf tool + The tool + +TODO: + +- the set_platform_flags() function is not nice +- more tests +- broaden tool scope, if needed + +""" + +import os, re + +from waflib import Options, Utils, Task, TaskGen +from waflib.Tools import c, ccroot, c_preproc +from waflib.Configure import conf +from waflib.TaskGen import feature, before_method +from waflib.Tools.c import cprogram + +opj = os.path.join + +@conf +def find_ticc(conf): + conf.find_program(['cl6x'], var='CC', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) + conf.env.CC_NAME = 'ticc' + +@conf +def find_tild(conf): + conf.find_program(['lnk6x'], var='LINK_CC', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) + conf.env.LINK_CC_NAME = 'tild' + +@conf +def find_tiar(conf): + conf.find_program(['ar6x'], var='AR', path_list=opj(getattr(Options.options, 'ti-cgt-dir', ""), 'bin')) + conf.env.AR_NAME = 'tiar' + conf.env.ARFLAGS = 'qru' + +@conf +def ticc_common_flags(conf): + v = conf.env + + if not v['LINK_CC']: + v['LINK_CC'] = v['CC'] + v['CCLNK_SRC_F'] = [] + v['CCLNK_TGT_F'] = ['-o'] + v['CPPPATH_ST'] = '-I%s' + v['DEFINES_ST'] = '-d%s' + + v['LIB_ST'] = '-l%s' # template for adding libs + v['LIBPATH_ST'] = '-i%s' # template for adding libpaths + v['STLIB_ST'] = '-l=%s.lib' + v['STLIBPATH_ST'] = '-i%s' + + # program + v['cprogram_PATTERN'] = '%s.out' + + # static lib + #v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] + v['cstlib_PATTERN'] = '%s.lib' + +def configure(conf): + v = conf.env + v.TI_CGT_DIR = getattr(Options.options, 'ti-cgt-dir', "") + v.TI_DSPLINK_DIR = getattr(Options.options, 'ti-dsplink-dir', "") + v.TI_BIOSUTILS_DIR = getattr(Options.options, 'ti-biosutils-dir', "") + v.TI_DSPBIOS_DIR = getattr(Options.options, 'ti-dspbios-dir', "") + v.TI_XDCTOOLS_DIR = getattr(Options.options, 'ti-xdctools-dir', "") + conf.find_ticc() + conf.find_tiar() + conf.find_tild() + conf.ticc_common_flags() + conf.cc_load_tools() + conf.cc_add_flags() + conf.link_add_flags() + conf.find_program(['tconf'], var='TCONF', path_list=v.TI_XDCTOOLS_DIR) + + conf.env.TCONF_INCLUDES += [ + opj(conf.env.TI_DSPBIOS_DIR, 'packages'), + ] + + conf.env.INCLUDES += [ + opj(conf.env.TI_CGT_DIR, 'include'), + ] + + conf.env.LIBPATH += [ + opj(conf.env.TI_CGT_DIR, "lib"), + ] + + conf.env.INCLUDES_DSPBIOS += [ + opj(conf.env.TI_DSPBIOS_DIR, 'packages', 'ti', 'bios', 'include'), + ] + + conf.env.LIBPATH_DSPBIOS += [ + opj(conf.env.TI_DSPBIOS_DIR, 'packages', 'ti', 'bios', 'lib'), + ] + + conf.env.INCLUDES_DSPLINK += [ + opj(conf.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc'), + ] + +@conf +def ti_set_debug(cfg, debug=1): + """ + Sets debug flags for the compiler. + + TODO: + - for each TI CFLAG/INCLUDES/LINKFLAGS/LIBPATH replace RELEASE by DEBUG + - -g --no_compress + """ + if debug: + cfg.env.CFLAGS += "-d_DEBUG -dDEBUG -dDDSP_DEBUG".split() + +@conf +def ti_dsplink_set_platform_flags(cfg, splat, dsp, dspbios_ver, board): + """ + Sets the INCLUDES, LINKFLAGS for DSPLINK and TCONF_INCLUDES + For the specific hardware. + + Assumes that DSPLINK was built in its own folder. + + :param splat: short platform name (eg. OMAPL138) + :param dsp: DSP name (eg. 674X) + :param dspbios_ver: string identifying DspBios version (eg. 5.XX) + :param board: board name (eg. OMAPL138GEM) + + """ + d1 = opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', 'DspBios', dspbios_ver) + d = opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', 'DspBios', dspbios_ver, board) + cfg.env.TCONF_INCLUDES += [d1, d] + cfg.env.INCLUDES_DSPLINK += [ + opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'inc', dsp), + d, + ] + + cfg.env.LINKFLAGS_DSPLINK += [ + opj(cfg.env.TI_DSPLINK_DIR, 'dsplink', 'dsp', 'export', 'BIN', 'DspBios', splat, board+'_0', 'RELEASE', 'dsplink%s.lib' % x) + for x in ('', 'pool', 'mpcs', 'mplist', 'msg', 'data', 'notify', 'ringio') + ] + + +def options(opt): + opt.add_option('--with-ti-cgt', type='string', dest='ti-cgt-dir', help = 'Specify alternate cgt root folder', default="") + opt.add_option('--with-ti-biosutils', type='string', dest='ti-biosutils-dir', help = 'Specify alternate biosutils folder', default="") + opt.add_option('--with-ti-dspbios', type='string', dest='ti-dspbios-dir', help = 'Specify alternate dspbios folder', default="") + opt.add_option('--with-ti-dsplink', type='string', dest='ti-dsplink-dir', help = 'Specify alternate dsplink folder', default="") + opt.add_option('--with-ti-xdctools', type='string', dest='ti-xdctools-dir', help = 'Specify alternate xdctools folder', default="") + +class ti_cprogram(cprogram): + """ + Link object files into a c program + + Changes: + + - the linked executable to have a relative path (because we can) + - put the LIBPATH first + """ + run_str = '${LINK_CC} ${LIBPATH_ST:LIBPATH} ${LIB_ST:LIB} ${LINKFLAGS} ${CCLNK_SRC_F}${SRC} ${CCLNK_TGT_F}${TGT[0].bldpath()} ${RPATH_ST:RPATH} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${FRAMEWORK_ST:FRAMEWORK} ${ARCH_ST:ARCH} ${STLIB_MARKER} ${STLIBPATH_ST:STLIBPATH} ${STLIB_ST:STLIB} ${SHLIB_MARKER} ' + +@feature("c") +@before_method('apply_link') +def use_ti_cprogram(self): + """ + Automatically uses ti_cprogram link process + """ + if 'cprogram' in self.features and self.env.CC_NAME == 'ticc': + self.features.insert(0, "ti_cprogram") + +class ti_c(Task.Task): + """ + Compile task for the TI codegen compiler + + This compiler does not allow specifying the output file name, only the output path. + + """ + "Compile C files into object files" + run_str = '${CC} ${ARCH_ST:ARCH} ${CFLAGS} ${FRAMEWORKPATH_ST:FRAMEWORKPATH} ${CPPPATH_ST:INCPATHS} ${DEFINES_ST:DEFINES} ${SRC} -c ${OUT} ${CPPFLAGS}' + vars = ['CCDEPS'] # unused variable to depend on, just in case + ext_in = ['.h'] # set the build order easily by using ext_out=['.h'] + scan = c_preproc.scan + +def create_compiled_task(self, name, node): + """ + Overrides ccroot.create_compiled_task to support ti_c + """ + out = '%s' % (node.change_ext('.obj').name) + if self.env.CC_NAME == 'ticc': + name = 'ti_c' + task = self.create_task(name, node, node.parent.find_or_declare(out)) + self.env.OUT = '-fr%s' % (node.parent.get_bld().abspath()) + try: + self.compiled_tasks.append(task) + except AttributeError: + self.compiled_tasks = [task] + return task + +@TaskGen.extension('.c') +def c_hook(self, node): + "Bind the c file extension to the creation of a :py:class:`waflib.Tools.c.c` instance" + if self.env.CC_NAME == 'ticc': + return create_compiled_task(self, 'ti_c', node) + else: + return self.create_compiled_task('c', node) + + +@feature("ti-tconf") +@before_method('process_source') +def apply_tconf(self): + sources = [x.get_src() for x in self.to_nodes(self.source, path=self.path.get_src())] + node = sources[0] + assert(sources[0].name.endswith(".tcf")) + if len(sources) > 1: + assert(sources[1].name.endswith(".cmd")) + + target = getattr(self, 'target', self.source) + target_node = node.get_bld().parent.find_or_declare(node.name) + + procid = "%d" % int(getattr(self, 'procid', 0)) + + importpaths = [] + includes = Utils.to_list(getattr(self, 'includes', [])) + for x in includes + self.env.TCONF_INCLUDES: + if x == os.path.abspath(x): + importpaths.append(x) + else: + relpath = self.path.find_node(x).path_from(target_node.parent) + importpaths.append(relpath) + + task = self.create_task('ti_tconf', sources, target_node.change_ext('.cdb')) + task.path = self.path + task.includes = includes + task.cwd = target_node.parent.abspath() + task.env = self.env.derive() + task.env["TCONFSRC"] = node.path_from(target_node.parent) + task.env["TCONFINC"] = '-Dconfig.importPath=%s' % ";".join(importpaths) + task.env['TCONFPROGNAME'] = '-Dconfig.programName=%s' % target + task.env['PROCID'] = procid + task.outputs = [ + target_node.change_ext("cfg_c.c"), + target_node.change_ext("cfg.s62"), + target_node.change_ext("cfg.cmd"), + ] + + create_compiled_task(self, 'ti_c', task.outputs[1]) + ctask = create_compiled_task(self, 'ti_c', task.outputs[0]) + ctask.env = self.env.derive() + + self.add_those_o_files(target_node.change_ext("cfg.cmd")) + if len(sources) > 1: + self.add_those_o_files(sources[1]) + self.source = [] + +re_tconf_include = re.compile(r'(?P<type>utils\.importFile)\("(?P<file>.*)"\)',re.M) +class ti_tconf(Task.Task): + run_str = '${TCONF} ${TCONFINC} ${TCONFPROGNAME} ${TCONFSRC} ${PROCID}' + color = 'PINK' + + def scan(self): + includes = Utils.to_list(getattr(self, 'includes', [])) + + def deps(node): + nodes, names = [], [] + if node: + code = Utils.readf(node.abspath()) + for match in re_tconf_include.finditer(code): + path = match.group('file') + if path: + for x in includes: + filename = opj(x, path) + fi = self.path.find_resource(filename) + if fi: + subnodes, subnames = deps(fi) + nodes += subnodes + names += subnames + nodes.append(fi) + names.append(path) + break + return nodes, names + return deps(self.inputs[0]) + diff --git a/third_party/waf/waflib/extras/unity.py b/third_party/waf/waflib/extras/unity.py new file mode 100644 index 0000000..78128ed --- /dev/null +++ b/third_party/waf/waflib/extras/unity.py @@ -0,0 +1,108 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +Compile whole groups of C/C++ files at once +(C and C++ files are processed independently though). + +To enable globally:: + + def options(opt): + opt.load('compiler_cxx') + def build(bld): + bld.load('compiler_cxx unity') + +To enable for specific task generators only:: + + def build(bld): + bld(features='c cprogram unity', source='main.c', ...) + +The file order is often significant in such builds, so it can be +necessary to adjust the order of source files and the batch sizes. +To control the amount of files processed in a batch per target +(the default is 50):: + + def build(bld): + bld(features='c cprogram', unity_size=20) + +""" + +from waflib import Task, Options +from waflib.Tools import c_preproc +from waflib import TaskGen + +MAX_BATCH = 50 + +EXTS_C = ('.c',) +EXTS_CXX = ('.cpp','.cc','.cxx','.C','.c++') + +def options(opt): + global MAX_BATCH + opt.add_option('--batchsize', action='store', dest='batchsize', type='int', default=MAX_BATCH, + help='default unity batch size (0 disables unity builds)') + +@TaskGen.taskgen_method +def batch_size(self): + default = getattr(Options.options, 'batchsize', MAX_BATCH) + if default < 1: + return 0 + return getattr(self, 'unity_size', default) + + +class unity(Task.Task): + color = 'BLUE' + scan = c_preproc.scan + def to_include(self, node): + ret = node.path_from(self.outputs[0].parent) + ret = ret.replace('\\', '\\\\').replace('"', '\\"') + return ret + def run(self): + lst = ['#include "%s"\n' % self.to_include(node) for node in self.inputs] + txt = ''.join(lst) + self.outputs[0].write(txt) + def __str__(self): + node = self.outputs[0] + return node.path_from(node.ctx.launch_node()) + +def bind_unity(obj, cls_name, exts): + if not 'mappings' in obj.__dict__: + obj.mappings = dict(obj.mappings) + + for j in exts: + fun = obj.mappings[j] + if fun.__name__ == 'unity_fun': + raise ValueError('Attempt to bind unity mappings multiple times %r' % j) + + def unity_fun(self, node): + cnt = self.batch_size() + if cnt <= 1: + return fun(self, node) + x = getattr(self, 'master_%s' % cls_name, None) + if not x or len(x.inputs) >= cnt: + x = self.create_task('unity') + setattr(self, 'master_%s' % cls_name, x) + + cnt_cur = getattr(self, 'cnt_%s' % cls_name, 0) + c_node = node.parent.find_or_declare('unity_%s_%d_%d.%s' % (self.idx, cnt_cur, cnt, cls_name)) + x.outputs = [c_node] + setattr(self, 'cnt_%s' % cls_name, cnt_cur + 1) + fun(self, c_node) + x.inputs.append(node) + + obj.mappings[j] = unity_fun + +@TaskGen.feature('unity') +@TaskGen.before('process_source') +def single_unity(self): + lst = self.to_list(self.features) + if 'c' in lst: + bind_unity(self, 'c', EXTS_C) + if 'cxx' in lst: + bind_unity(self, 'cxx', EXTS_CXX) + +def build(bld): + if bld.env.CC_NAME: + bind_unity(TaskGen.task_gen, 'c', EXTS_C) + if bld.env.CXX_NAME: + bind_unity(TaskGen.task_gen, 'cxx', EXTS_CXX) + diff --git a/third_party/waf/waflib/extras/use_config.py b/third_party/waf/waflib/extras/use_config.py new file mode 100644 index 0000000..ef5129f --- /dev/null +++ b/third_party/waf/waflib/extras/use_config.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python +# coding=utf-8 +# Mathieu Courtois - EDF R&D, 2013 - http://www.code-aster.org + +""" +When a project has a lot of options the 'waf configure' command line can be +very long and it becomes a cause of error. +This tool provides a convenient way to load a set of configuration parameters +from a local file or from a remote url. + +The configuration parameters are stored in a Python file that is imported as +an extra waf tool can be. + +Example: +$ waf configure --use-config-dir=http://www.anywhere.org --use-config=myconf1 ... + +The file 'myconf1' will be downloaded from 'http://www.anywhere.org' +(or 'http://www.anywhere.org/wafcfg'). +If the files are available locally, it could be: +$ waf configure --use-config-dir=/somewhere/myconfigurations --use-config=myconf1 ... + +The configuration of 'myconf1.py' is automatically loaded by calling +its 'configure' function. In this example, it defines environment variables and +set options: + +def configure(self): + self.env['CC'] = 'gcc-4.8' + self.env.append_value('LIBPATH', [...]) + self.options.perlbinary = '/usr/local/bin/perl' + self.options.pyc = False + +The corresponding command line should have been: +$ CC=gcc-4.8 LIBPATH=... waf configure --nopyc --with-perl-binary=/usr/local/bin/perl + + +This is an extra tool, not bundled with the default waf binary. +To add the use_config tool to the waf file: +$ ./waf-light --tools=use_config + +When using this tool, the wscript will look like: + + def options(opt): + opt.load('use_config') + + def configure(conf): + conf.load('use_config') +""" + +import sys +import os.path as osp +import os + +local_repo = '' +"""Local repository containing additional Waf tools (plugins)""" +remote_repo = 'https://gitlab.com/ita1024/waf/raw/master/' +""" +Remote directory containing downloadable waf tools. The missing tools can be downloaded by using:: + + $ waf configure --download +""" + +remote_locs = ['waflib/extras', 'waflib/Tools'] +""" +Remote directories for use with :py:const:`waflib.extras.use_config.remote_repo` +""" + + +try: + from urllib import request +except ImportError: + from urllib import urlopen +else: + urlopen = request.urlopen + + +from waflib import Errors, Context, Logs, Utils, Options, Configure + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + + + + +DEFAULT_DIR = 'wafcfg' +# add first the current wafcfg subdirectory +sys.path.append(osp.abspath(DEFAULT_DIR)) + +def options(self): + group = self.add_option_group('configure options') + group.add_option('--download', dest='download', default=False, action='store_true', help='try to download the tools if missing') + + group.add_option('--use-config', action='store', default=None, + metavar='CFG', dest='use_config', + help='force the configuration parameters by importing ' + 'CFG.py. Several modules may be provided (comma ' + 'separated).') + group.add_option('--use-config-dir', action='store', default=DEFAULT_DIR, + metavar='CFG_DIR', dest='use_config_dir', + help='path or url where to find the configuration file') + +def download_check(node): + """ + Hook to check for the tools which are downloaded. Replace with your function if necessary. + """ + pass + + +def download_tool(tool, force=False, ctx=None): + """ + Download a Waf tool from the remote repository defined in :py:const:`waflib.extras.use_config.remote_repo`:: + + $ waf configure --download + """ + for x in Utils.to_list(remote_repo): + for sub in Utils.to_list(remote_locs): + url = '/'.join((x, sub, tool + '.py')) + try: + web = urlopen(url) + try: + if web.getcode() != 200: + continue + except AttributeError: + pass + except Exception: + # on python3 urlopen throws an exception + # python 2.3 does not have getcode and throws an exception to fail + continue + else: + tmp = ctx.root.make_node(os.sep.join((Context.waf_dir, 'waflib', 'extras', tool + '.py'))) + tmp.write(web.read(), 'wb') + Logs.warn('Downloaded %s from %s', tool, url) + download_check(tmp) + try: + module = Context.load_tool(tool) + except Exception: + Logs.warn('The tool %s from %s is unusable', tool, url) + try: + tmp.delete() + except Exception: + pass + continue + return module + + raise Errors.WafError('Could not load the Waf tool') + +def load_tool(tool, tooldir=None, ctx=None, with_sys_path=True): + try: + module = Context.load_tool_default(tool, tooldir, ctx, with_sys_path) + except ImportError as e: + if not ctx or not hasattr(Options.options, 'download'): + Logs.error('Could not load %r during options phase (download unavailable at this point)' % tool) + raise + if Options.options.download: + module = download_tool(tool, ctx=ctx) + if not module: + ctx.fatal('Could not load the Waf tool %r or download a suitable replacement from the repository (sys.path %r)\n%s' % (tool, sys.path, e)) + else: + ctx.fatal('Could not load the Waf tool %r from %r (try the --download option?):\n%s' % (tool, sys.path, e)) + return module + +Context.load_tool_default = Context.load_tool +Context.load_tool = load_tool +Configure.download_tool = download_tool + +def configure(self): + opts = self.options + use_cfg = opts.use_config + if use_cfg is None: + return + url = urlparse(opts.use_config_dir) + kwargs = {} + if url.scheme: + kwargs['download'] = True + kwargs['remote_url'] = url.geturl() + # search first with the exact url, else try with +'/wafcfg' + kwargs['remote_locs'] = ['', DEFAULT_DIR] + tooldir = url.geturl() + ' ' + DEFAULT_DIR + for cfg in use_cfg.split(','): + Logs.pprint('NORMAL', "Searching configuration '%s'..." % cfg) + self.load(cfg, tooldir=tooldir, **kwargs) + self.start_msg('Checking for configuration') + self.end_msg(use_cfg) + diff --git a/third_party/waf/waflib/extras/valadoc.py b/third_party/waf/waflib/extras/valadoc.py new file mode 100644 index 0000000..c50f69e --- /dev/null +++ b/third_party/waf/waflib/extras/valadoc.py @@ -0,0 +1,140 @@ +#! /usr/bin/env python +# encoding: UTF-8 +# Nicolas Joseph 2009 + +""" +ported from waf 1.5: +TODO: tabs vs spaces +""" + +from waflib import Task, Utils, Errors, Logs +from waflib.TaskGen import feature + +VALADOC_STR = '${VALADOC}' + +class valadoc(Task.Task): + vars = ['VALADOC', 'VALADOCFLAGS'] + color = 'BLUE' + after = ['cprogram', 'cstlib', 'cshlib', 'cxxprogram', 'cxxstlib', 'cxxshlib'] + quiet = True # no outputs .. this is weird + + def __init__(self, *k, **kw): + Task.Task.__init__(self, *k, **kw) + self.output_dir = '' + self.doclet = '' + self.package_name = '' + self.package_version = '' + self.files = [] + self.vapi_dirs = [] + self.protected = True + self.private = False + self.inherit = False + self.deps = False + self.vala_defines = [] + self.vala_target_glib = None + self.enable_non_null_experimental = False + self.force = False + + def run(self): + if not self.env['VALADOCFLAGS']: + self.env['VALADOCFLAGS'] = '' + cmd = [Utils.subst_vars(VALADOC_STR, self.env)] + cmd.append ('-o %s' % self.output_dir) + if getattr(self, 'doclet', None): + cmd.append ('--doclet %s' % self.doclet) + cmd.append ('--package-name %s' % self.package_name) + if getattr(self, 'package_version', None): + cmd.append ('--package-version %s' % self.package_version) + if getattr(self, 'packages', None): + for package in self.packages: + cmd.append ('--pkg %s' % package) + if getattr(self, 'vapi_dirs', None): + for vapi_dir in self.vapi_dirs: + cmd.append ('--vapidir %s' % vapi_dir) + if not getattr(self, 'protected', None): + cmd.append ('--no-protected') + if getattr(self, 'private', None): + cmd.append ('--private') + if getattr(self, 'inherit', None): + cmd.append ('--inherit') + if getattr(self, 'deps', None): + cmd.append ('--deps') + if getattr(self, 'vala_defines', None): + for define in self.vala_defines: + cmd.append ('--define %s' % define) + if getattr(self, 'vala_target_glib', None): + cmd.append ('--target-glib=%s' % self.vala_target_glib) + if getattr(self, 'enable_non_null_experimental', None): + cmd.append ('--enable-non-null-experimental') + if getattr(self, 'force', None): + cmd.append ('--force') + cmd.append (' '.join ([x.abspath() for x in self.files])) + return self.generator.bld.exec_command(' '.join(cmd)) + +@feature('valadoc') +def process_valadoc(self): + """ + Generate API documentation from Vala source code with valadoc + + doc = bld( + features = 'valadoc', + output_dir = '../doc/html', + package_name = 'vala-gtk-example', + package_version = '1.0.0', + packages = 'gtk+-2.0', + vapi_dirs = '../vapi', + force = True + ) + + path = bld.path.find_dir ('../src') + doc.files = path.ant_glob (incl='**/*.vala') + """ + + task = self.create_task('valadoc') + if getattr(self, 'output_dir', None): + task.output_dir = self.path.find_or_declare(self.output_dir).abspath() + else: + Errors.WafError('no output directory') + if getattr(self, 'doclet', None): + task.doclet = self.doclet + else: + Errors.WafError('no doclet directory') + if getattr(self, 'package_name', None): + task.package_name = self.package_name + else: + Errors.WafError('no package name') + if getattr(self, 'package_version', None): + task.package_version = self.package_version + if getattr(self, 'packages', None): + task.packages = Utils.to_list(self.packages) + if getattr(self, 'vapi_dirs', None): + vapi_dirs = Utils.to_list(self.vapi_dirs) + for vapi_dir in vapi_dirs: + try: + task.vapi_dirs.append(self.path.find_dir(vapi_dir).abspath()) + except AttributeError: + Logs.warn('Unable to locate Vala API directory: %r', vapi_dir) + if getattr(self, 'files', None): + task.files = self.files + else: + Errors.WafError('no input file') + if getattr(self, 'protected', None): + task.protected = self.protected + if getattr(self, 'private', None): + task.private = self.private + if getattr(self, 'inherit', None): + task.inherit = self.inherit + if getattr(self, 'deps', None): + task.deps = self.deps + if getattr(self, 'vala_defines', None): + task.vala_defines = Utils.to_list(self.vala_defines) + if getattr(self, 'vala_target_glib', None): + task.vala_target_glib = self.vala_target_glib + if getattr(self, 'enable_non_null_experimental', None): + task.enable_non_null_experimental = self.enable_non_null_experimental + if getattr(self, 'force', None): + task.force = self.force + +def configure(conf): + conf.find_program('valadoc', errmsg='You must install valadoc <http://live.gnome.org/Valadoc> for generate the API documentation') + diff --git a/third_party/waf/waflib/extras/waf_xattr.py b/third_party/waf/waflib/extras/waf_xattr.py new file mode 100644 index 0000000..351dd63 --- /dev/null +++ b/third_party/waf/waflib/extras/waf_xattr.py @@ -0,0 +1,150 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +Use extended attributes instead of database files + +1. Input files will be made writable +2. This is only for systems providing extended filesystem attributes +3. By default, hashes are calculated only if timestamp/size change (HASH_CACHE below) +4. The module enables "deep_inputs" on all tasks by propagating task signatures +5. This module also skips task signature comparisons for task code changes due to point 4. +6. This module is for Python3/Linux only, but it could be extended to Python2/other systems + using the xattr library +7. For projects in which tasks always declare output files, it should be possible to + store the rest of build context attributes on output files (imp_sigs, raw_deps and node_deps) + but this is not done here + +On a simple C++ project benchmark, the variations before and after adding waf_xattr.py were observed: +total build time: 20s -> 22s +no-op build time: 2.4s -> 1.8s +pickle file size: 2.9MB -> 2.6MB +""" + +import os +from waflib import Logs, Node, Task, Utils, Errors +from waflib.Task import SKIP_ME, RUN_ME, CANCEL_ME, ASK_LATER, SKIPPED, MISSING + +HASH_CACHE = True +SIG_VAR = 'user.waf.sig' +SEP = ','.encode() +TEMPLATE = '%b%d,%d'.encode() + +try: + PermissionError +except NameError: + PermissionError = IOError + +def getxattr(self): + return os.getxattr(self.abspath(), SIG_VAR) + +def setxattr(self, val): + os.setxattr(self.abspath(), SIG_VAR, val) + +def h_file(self): + try: + ret = getxattr(self) + except OSError: + if HASH_CACHE: + st = os.stat(self.abspath()) + mtime = st.st_mtime + size = st.st_size + else: + if len(ret) == 16: + # for build directory files + return ret + + if HASH_CACHE: + # check if timestamp and mtime match to avoid re-hashing + st = os.stat(self.abspath()) + mtime, size = ret[16:].split(SEP) + if int(1000 * st.st_mtime) == int(mtime) and st.st_size == int(size): + return ret[:16] + + ret = Utils.h_file(self.abspath()) + if HASH_CACHE: + val = TEMPLATE % (ret, int(1000 * st.st_mtime), int(st.st_size)) + try: + setxattr(self, val) + except PermissionError: + os.chmod(self.abspath(), st.st_mode | 128) + setxattr(self, val) + return ret + +def runnable_status(self): + bld = self.generator.bld + if bld.is_install < 0: + return SKIP_ME + + for t in self.run_after: + if not t.hasrun: + return ASK_LATER + elif t.hasrun < SKIPPED: + # a dependency has an error + return CANCEL_ME + + # first compute the signature + try: + new_sig = self.signature() + except Errors.TaskNotReady: + return ASK_LATER + + if not self.outputs: + # compare the signature to a signature computed previously + # this part is only for tasks with no output files + key = self.uid() + try: + prev_sig = bld.task_sigs[key] + except KeyError: + Logs.debug('task: task %r must run: it was never run before or the task code changed', self) + return RUN_ME + if new_sig != prev_sig: + Logs.debug('task: task %r must run: the task signature changed', self) + return RUN_ME + + # compare the signatures of the outputs to make a decision + for node in self.outputs: + try: + sig = node.h_file() + except EnvironmentError: + Logs.debug('task: task %r must run: an output node does not exist', self) + return RUN_ME + if sig != new_sig: + Logs.debug('task: task %r must run: an output node is stale', self) + return RUN_ME + + return (self.always_run and RUN_ME) or SKIP_ME + +def post_run(self): + bld = self.generator.bld + sig = self.signature() + for node in self.outputs: + if not node.exists(): + self.hasrun = MISSING + self.err_msg = '-> missing file: %r' % node.abspath() + raise Errors.WafError(self.err_msg) + os.setxattr(node.abspath(), 'user.waf.sig', sig) + if not self.outputs: + # only for task with no outputs + bld.task_sigs[self.uid()] = sig + if not self.keep_last_cmd: + try: + del self.last_cmd + except AttributeError: + pass + +try: + os.getxattr +except AttributeError: + pass +else: + h_file.__doc__ = Node.Node.h_file.__doc__ + + # keep file hashes as file attributes + Node.Node.h_file = h_file + + # enable "deep_inputs" on all tasks + Task.Task.runnable_status = runnable_status + Task.Task.post_run = post_run + Task.Task.sig_deep_inputs = Utils.nada + diff --git a/third_party/waf/waflib/extras/wafcache.py b/third_party/waf/waflib/extras/wafcache.py new file mode 100644 index 0000000..30ac3ef --- /dev/null +++ b/third_party/waf/waflib/extras/wafcache.py @@ -0,0 +1,648 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2019 (ita) + +""" +Filesystem-based cache system to share and re-use build artifacts + +Cache access operations (copy to and from) are delegated to +independent pre-forked worker subprocesses. + +The following environment variables may be set: +* WAFCACHE: several possibilities: + - File cache: + absolute path of the waf cache (~/.cache/wafcache_user, + where `user` represents the currently logged-in user) + - URL to a cache server, for example: + export WAFCACHE=http://localhost:8080/files/ + in that case, GET/POST requests are made to urls of the form + http://localhost:8080/files/000000000/0 (cache management is delegated to the server) + - GCS, S3 or MINIO bucket + gs://my-bucket/ (uses gsutil command line tool or WAFCACHE_CMD) + s3://my-bucket/ (uses aws command line tool or WAFCACHE_CMD) + minio://my-bucket/ (uses mc command line tool or WAFCACHE_CMD) +* WAFCACHE_CMD: bucket upload/download command, for example: + WAFCACHE_CMD="gsutil cp %{SRC} %{TGT}" + Note that the WAFCACHE bucket value is used for the source or destination + depending on the operation (upload or download). For example, with: + WAFCACHE="gs://mybucket/" + the following commands may be run: + gsutil cp build/myprogram gs://mybucket/aa/aaaaa/1 + gsutil cp gs://mybucket/bb/bbbbb/2 build/somefile +* WAFCACHE_NO_PUSH: if set, disables pushing to the cache +* WAFCACHE_VERBOSITY: if set, displays more detailed cache operations +* WAFCACHE_STATS: if set, displays cache usage statistics on exit + +File cache specific options: + Files are copied using hard links by default; if the cache is located + onto another partition, the system switches to file copies instead. +* WAFCACHE_TRIM_MAX_FOLDER: maximum amount of tasks to cache (1M) +* WAFCACHE_EVICT_MAX_BYTES: maximum amount of cache size in bytes (10GB) +* WAFCACHE_EVICT_INTERVAL_MINUTES: minimum time interval to try + and trim the cache (3 minutes) + +Upload specific options: +* WAFCACHE_ASYNC_WORKERS: define a number of workers to upload results asynchronously + this may improve build performance with many/long file uploads + the default is unset (synchronous uploads) +* WAFCACHE_ASYNC_NOWAIT: do not wait for uploads to complete (default: False) + this requires asynchonous uploads to have an effect + +Usage:: + + def build(bld): + bld.load('wafcache') + ... + +To troubleshoot:: + + waf clean build --zone=wafcache +""" + +import atexit, base64, errno, fcntl, getpass, os, re, shutil, sys, time, threading, traceback, urllib3, shlex +try: + import subprocess32 as subprocess +except ImportError: + import subprocess + +base_cache = os.path.expanduser('~/.cache/') +if not os.path.isdir(base_cache): + base_cache = '/tmp/' +default_wafcache_dir = os.path.join(base_cache, 'wafcache_' + getpass.getuser()) + +CACHE_DIR = os.environ.get('WAFCACHE', default_wafcache_dir) +WAFCACHE_CMD = os.environ.get('WAFCACHE_CMD') +TRIM_MAX_FOLDERS = int(os.environ.get('WAFCACHE_TRIM_MAX_FOLDER', 1000000)) +EVICT_INTERVAL_MINUTES = int(os.environ.get('WAFCACHE_EVICT_INTERVAL_MINUTES', 3)) +EVICT_MAX_BYTES = int(os.environ.get('WAFCACHE_EVICT_MAX_BYTES', 10**10)) +WAFCACHE_NO_PUSH = 1 if os.environ.get('WAFCACHE_NO_PUSH') else 0 +WAFCACHE_VERBOSITY = 1 if os.environ.get('WAFCACHE_VERBOSITY') else 0 +WAFCACHE_STATS = 1 if os.environ.get('WAFCACHE_STATS') else 0 +WAFCACHE_ASYNC_WORKERS = os.environ.get('WAFCACHE_ASYNC_WORKERS') +WAFCACHE_ASYNC_NOWAIT = os.environ.get('WAFCACHE_ASYNC_NOWAIT') +OK = "ok" + +re_waf_cmd = re.compile('(?P<src>%{SRC})|(?P<tgt>%{TGT})') + +try: + import cPickle +except ImportError: + import pickle as cPickle + +if __name__ != '__main__': + from waflib import Task, Logs, Utils, Build + +def can_retrieve_cache(self): + """ + New method for waf Task classes + """ + if not self.outputs: + return False + + self.cached = False + + sig = self.signature() + ssig = Utils.to_hex(self.uid() + sig) + + if WAFCACHE_STATS: + self.generator.bld.cache_reqs += 1 + + files_to = [node.abspath() for node in self.outputs] + proc = get_process() + err = cache_command(proc, ssig, [], files_to) + process_pool.append(proc) + if err.startswith(OK): + if WAFCACHE_VERBOSITY: + Logs.pprint('CYAN', ' Fetched %r from cache' % files_to) + else: + Logs.debug('wafcache: fetched %r from cache', files_to) + if WAFCACHE_STATS: + self.generator.bld.cache_hits += 1 + else: + if WAFCACHE_VERBOSITY: + Logs.pprint('YELLOW', ' No cache entry %s' % files_to) + else: + Logs.debug('wafcache: No cache entry %s: %s', files_to, err) + return False + + self.cached = True + return True + +def put_files_cache(self): + """ + New method for waf Task classes + """ + if WAFCACHE_NO_PUSH or getattr(self, 'cached', None) or not self.outputs: + return + + files_from = [] + for node in self.outputs: + path = node.abspath() + if not os.path.isfile(path): + return + files_from.append(path) + + bld = self.generator.bld + old_sig = self.signature() + + for node in self.inputs: + try: + del node.ctx.cache_sig[node] + except KeyError: + pass + + delattr(self, 'cache_sig') + sig = self.signature() + + def _async_put_files_cache(bld, ssig, files_from): + proc = get_process() + if WAFCACHE_ASYNC_WORKERS: + with bld.wafcache_lock: + if bld.wafcache_stop: + process_pool.append(proc) + return + bld.wafcache_procs.add(proc) + + err = cache_command(proc, ssig, files_from, []) + process_pool.append(proc) + if err.startswith(OK): + if WAFCACHE_VERBOSITY: + Logs.pprint('CYAN', ' Successfully uploaded %s to cache' % files_from) + else: + Logs.debug('wafcache: Successfully uploaded %r to cache', files_from) + if WAFCACHE_STATS: + bld.cache_puts += 1 + else: + if WAFCACHE_VERBOSITY: + Logs.pprint('RED', ' Error caching step results %s: %s' % (files_from, err)) + else: + Logs.debug('wafcache: Error caching results %s: %s', files_from, err) + + if old_sig == sig: + ssig = Utils.to_hex(self.uid() + sig) + if WAFCACHE_ASYNC_WORKERS: + fut = bld.wafcache_executor.submit(_async_put_files_cache, bld, ssig, files_from) + bld.wafcache_uploads.append(fut) + else: + _async_put_files_cache(bld, ssig, files_from) + else: + Logs.debug('wafcache: skipped %r upload due to late input modifications %r', self.outputs, self.inputs) + + bld.task_sigs[self.uid()] = self.cache_sig + +def hash_env_vars(self, env, vars_lst): + """ + Reimplement BuildContext.hash_env_vars so that the resulting hash does not depend on local paths + """ + if not env.table: + env = env.parent + if not env: + return Utils.SIG_NIL + + idx = str(id(env)) + str(vars_lst) + try: + cache = self.cache_env + except AttributeError: + cache = self.cache_env = {} + else: + try: + return self.cache_env[idx] + except KeyError: + pass + + v = str([env[a] for a in vars_lst]) + v = v.replace(self.srcnode.abspath().__repr__()[:-1], '') + m = Utils.md5() + m.update(v.encode()) + ret = m.digest() + + Logs.debug('envhash: %r %r', ret, v) + + cache[idx] = ret + + return ret + +def uid(self): + """ + Reimplement Task.uid() so that the signature does not depend on local paths + """ + try: + return self.uid_ + except AttributeError: + m = Utils.md5() + src = self.generator.bld.srcnode + up = m.update + up(self.__class__.__name__.encode()) + for x in self.inputs + self.outputs: + up(x.path_from(src).encode()) + self.uid_ = m.digest() + return self.uid_ + + +def make_cached(cls): + """ + Enable the waf cache for a given task class + """ + if getattr(cls, 'nocache', None) or getattr(cls, 'has_cache', False): + return + + full_name = "%s.%s" % (cls.__module__, cls.__name__) + if full_name in ('waflib.Tools.ccroot.vnum', 'waflib.Build.inst'): + return + + m1 = getattr(cls, 'run', None) + def run(self): + if getattr(self, 'nocache', False): + return m1(self) + if self.can_retrieve_cache(): + return 0 + return m1(self) + cls.run = run + + m2 = getattr(cls, 'post_run', None) + def post_run(self): + if getattr(self, 'nocache', False): + return m2(self) + ret = m2(self) + self.put_files_cache() + return ret + cls.post_run = post_run + cls.has_cache = True + +process_pool = [] +def get_process(): + """ + Returns a worker process that can process waf cache commands + The worker process is assumed to be returned to the process pool when unused + """ + try: + return process_pool.pop() + except IndexError: + filepath = os.path.dirname(os.path.abspath(__file__)) + os.sep + 'wafcache.py' + cmd = [sys.executable, '-c', Utils.readf(filepath)] + return subprocess.Popen(cmd, stdout=subprocess.PIPE, stdin=subprocess.PIPE, bufsize=0) + +def atexit_pool(): + for proc in process_pool: + proc.kill() +atexit.register(atexit_pool) + +def build(bld): + """ + Called during the build process to enable file caching + """ + + if WAFCACHE_ASYNC_WORKERS: + try: + num_workers = int(WAFCACHE_ASYNC_WORKERS) + except ValueError: + Logs.warn('Invalid WAFCACHE_ASYNC_WORKERS specified: %r' % WAFCACHE_ASYNC_WORKERS) + else: + from concurrent.futures import ThreadPoolExecutor + bld.wafcache_executor = ThreadPoolExecutor(max_workers=num_workers) + bld.wafcache_uploads = [] + bld.wafcache_procs = set([]) + bld.wafcache_stop = False + bld.wafcache_lock = threading.Lock() + + def finalize_upload_async(bld): + if WAFCACHE_ASYNC_NOWAIT: + with bld.wafcache_lock: + bld.wafcache_stop = True + + for fut in reversed(bld.wafcache_uploads): + fut.cancel() + + for proc in bld.wafcache_procs: + proc.kill() + + bld.wafcache_procs.clear() + else: + Logs.pprint('CYAN', '... waiting for wafcache uploads to complete (%s uploads)' % len(bld.wafcache_uploads)) + bld.wafcache_executor.shutdown(wait=True) + bld.add_post_fun(finalize_upload_async) + + if WAFCACHE_STATS: + # Init counter for statistics and hook to print results at the end + bld.cache_reqs = bld.cache_hits = bld.cache_puts = 0 + + def printstats(bld): + hit_ratio = 0 + if bld.cache_reqs > 0: + hit_ratio = (bld.cache_hits / bld.cache_reqs) * 100 + Logs.pprint('CYAN', ' wafcache stats: %s requests, %s hits (ratio: %.2f%%), %s writes' % + (bld.cache_reqs, bld.cache_hits, hit_ratio, bld.cache_puts) ) + bld.add_post_fun(printstats) + + if process_pool: + # already called once + return + + # pre-allocation + processes = [get_process() for x in range(bld.jobs)] + process_pool.extend(processes) + + Task.Task.can_retrieve_cache = can_retrieve_cache + Task.Task.put_files_cache = put_files_cache + Task.Task.uid = uid + Build.BuildContext.hash_env_vars = hash_env_vars + for x in reversed(list(Task.classes.values())): + make_cached(x) + +def cache_command(proc, sig, files_from, files_to): + """ + Create a command for cache worker processes, returns a pickled + base64-encoded tuple containing the task signature, a list of files to + cache and a list of files files to get from cache (one of the lists + is assumed to be empty) + """ + obj = base64.b64encode(cPickle.dumps([sig, files_from, files_to])) + proc.stdin.write(obj) + proc.stdin.write('\n'.encode()) + proc.stdin.flush() + obj = proc.stdout.readline() + if not obj: + raise OSError('Preforked sub-process %r died' % proc.pid) + return cPickle.loads(base64.b64decode(obj)) + +try: + copyfun = os.link +except NameError: + copyfun = shutil.copy2 + +def atomic_copy(orig, dest): + """ + Copy files to the cache, the operation is atomic for a given file + """ + global copyfun + tmp = dest + '.tmp' + up = os.path.dirname(dest) + try: + os.makedirs(up) + except OSError: + pass + + try: + copyfun(orig, tmp) + except OSError as e: + if e.errno == errno.EXDEV: + copyfun = shutil.copy2 + copyfun(orig, tmp) + else: + raise + os.rename(tmp, dest) + +def lru_trim(): + """ + the cache folders take the form: + `CACHE_DIR/0b/0b180f82246d726ece37c8ccd0fb1cde2650d7bfcf122ec1f169079a3bfc0ab9` + they are listed in order of last access, and then removed + until the amount of folders is within TRIM_MAX_FOLDERS and the total space + taken by files is less than EVICT_MAX_BYTES + """ + lst = [] + for up in os.listdir(CACHE_DIR): + if len(up) == 2: + sub = os.path.join(CACHE_DIR, up) + for hval in os.listdir(sub): + path = os.path.join(sub, hval) + + size = 0 + for fname in os.listdir(path): + try: + size += os.lstat(os.path.join(path, fname)).st_size + except OSError: + pass + lst.append((os.stat(path).st_mtime, size, path)) + + lst.sort(key=lambda x: x[0]) + lst.reverse() + + tot = sum(x[1] for x in lst) + while tot > EVICT_MAX_BYTES or len(lst) > TRIM_MAX_FOLDERS: + _, tmp_size, path = lst.pop() + tot -= tmp_size + + tmp = path + '.remove' + try: + shutil.rmtree(tmp) + except OSError: + pass + try: + os.rename(path, tmp) + except OSError: + sys.stderr.write('Could not rename %r to %r\n' % (path, tmp)) + else: + try: + shutil.rmtree(tmp) + except OSError: + sys.stderr.write('Could not remove %r\n' % tmp) + sys.stderr.write("Cache trimmed: %r bytes in %r folders left\n" % (tot, len(lst))) + + +def lru_evict(): + """ + Reduce the cache size + """ + lockfile = os.path.join(CACHE_DIR, 'all.lock') + try: + st = os.stat(lockfile) + except EnvironmentError as e: + if e.errno == errno.ENOENT: + with open(lockfile, 'w') as f: + f.write('') + return + else: + raise + + if st.st_mtime < time.time() - EVICT_INTERVAL_MINUTES * 60: + # check every EVICT_INTERVAL_MINUTES minutes if the cache is too big + # OCLOEXEC is unnecessary because no processes are spawned + fd = os.open(lockfile, os.O_RDWR | os.O_CREAT, 0o755) + try: + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except EnvironmentError: + if WAFCACHE_VERBOSITY: + sys.stderr.write('wafcache: another cleaning process is running\n') + else: + # now dow the actual cleanup + lru_trim() + os.utime(lockfile, None) + finally: + os.close(fd) + +class netcache(object): + def __init__(self): + self.http = urllib3.PoolManager() + + def url_of(self, sig, i): + return "%s/%s/%s" % (CACHE_DIR, sig, i) + + def upload(self, file_path, sig, i): + url = self.url_of(sig, i) + with open(file_path, 'rb') as f: + file_data = f.read() + r = self.http.request('POST', url, timeout=60, + fields={ 'file': ('%s/%s' % (sig, i), file_data), }) + if r.status >= 400: + raise OSError("Invalid status %r %r" % (url, r.status)) + + def download(self, file_path, sig, i): + url = self.url_of(sig, i) + with self.http.request('GET', url, preload_content=False, timeout=60) as inf: + if inf.status >= 400: + raise OSError("Invalid status %r %r" % (url, inf.status)) + with open(file_path, 'wb') as out: + shutil.copyfileobj(inf, out) + + def copy_to_cache(self, sig, files_from, files_to): + try: + for i, x in enumerate(files_from): + if not os.path.islink(x): + self.upload(x, sig, i) + except Exception: + return traceback.format_exc() + return OK + + def copy_from_cache(self, sig, files_from, files_to): + try: + for i, x in enumerate(files_to): + self.download(x, sig, i) + except Exception: + return traceback.format_exc() + return OK + +class fcache(object): + def __init__(self): + if not os.path.exists(CACHE_DIR): + try: + os.makedirs(CACHE_DIR) + except OSError: + pass + if not os.path.exists(CACHE_DIR): + raise ValueError('Could not initialize the cache directory') + + def copy_to_cache(self, sig, files_from, files_to): + """ + Copy files to the cache, existing files are overwritten, + and the copy is atomic only for a given file, not for all files + that belong to a given task object + """ + try: + for i, x in enumerate(files_from): + dest = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) + atomic_copy(x, dest) + except Exception: + return traceback.format_exc() + else: + # attempt trimming if caching was successful: + # we may have things to trim! + try: + lru_evict() + except Exception: + return traceback.format_exc() + return OK + + def copy_from_cache(self, sig, files_from, files_to): + """ + Copy files from the cache + """ + try: + for i, x in enumerate(files_to): + orig = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) + atomic_copy(orig, x) + + # success! update the cache time + os.utime(os.path.join(CACHE_DIR, sig[:2], sig), None) + except Exception: + return traceback.format_exc() + return OK + +class bucket_cache(object): + def bucket_copy(self, source, target): + if WAFCACHE_CMD: + def replacer(match): + if match.group('src'): + return source + elif match.group('tgt'): + return target + cmd = [re_waf_cmd.sub(replacer, x) for x in shlex.split(WAFCACHE_CMD)] + elif CACHE_DIR.startswith('s3://'): + cmd = ['aws', 's3', 'cp', source, target] + elif CACHE_DIR.startswith('gs://'): + cmd = ['gsutil', 'cp', source, target] + else: + cmd = ['mc', 'cp', source, target] + + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + out, err = proc.communicate() + if proc.returncode: + raise OSError('Error copy %r to %r using: %r (exit %r):\n out:%s\n err:%s' % ( + source, target, cmd, proc.returncode, out.decode(errors='replace'), err.decode(errors='replace'))) + + def copy_to_cache(self, sig, files_from, files_to): + try: + for i, x in enumerate(files_from): + dest = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) + self.bucket_copy(x, dest) + except Exception: + return traceback.format_exc() + return OK + + def copy_from_cache(self, sig, files_from, files_to): + try: + for i, x in enumerate(files_to): + orig = os.path.join(CACHE_DIR, sig[:2], sig, str(i)) + self.bucket_copy(orig, x) + except EnvironmentError: + return traceback.format_exc() + return OK + +def loop(service): + """ + This function is run when this file is run as a standalone python script, + it assumes a parent process that will communicate the commands to it + as pickled-encoded tuples (one line per command) + + The commands are to copy files to the cache or copy files from the + cache to a target destination + """ + # one operation is performed at a single time by a single process + # therefore stdin never has more than one line + txt = sys.stdin.readline().strip() + if not txt: + # parent process probably ended + sys.exit(1) + ret = OK + + [sig, files_from, files_to] = cPickle.loads(base64.b64decode(txt)) + if files_from: + # TODO return early when pushing files upstream + ret = service.copy_to_cache(sig, files_from, files_to) + elif files_to: + # the build process waits for workers to (possibly) obtain files from the cache + ret = service.copy_from_cache(sig, files_from, files_to) + else: + ret = "Invalid command" + + obj = base64.b64encode(cPickle.dumps(ret)) + sys.stdout.write(obj.decode()) + sys.stdout.write('\n') + sys.stdout.flush() + +if __name__ == '__main__': + if CACHE_DIR.startswith('s3://') or CACHE_DIR.startswith('gs://') or CACHE_DIR.startswith('minio://'): + if CACHE_DIR.startswith('minio://'): + CACHE_DIR = CACHE_DIR[8:] # minio doesn't need the protocol part, uses config aliases + service = bucket_cache() + elif CACHE_DIR.startswith('http'): + service = netcache() + else: + service = fcache() + while 1: + try: + loop(service) + except KeyboardInterrupt: + break + diff --git a/third_party/waf/waflib/extras/why.py b/third_party/waf/waflib/extras/why.py new file mode 100644 index 0000000..1bb941f --- /dev/null +++ b/third_party/waf/waflib/extras/why.py @@ -0,0 +1,78 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010 (ita) + +""" +This tool modifies the task signature scheme to store and obtain +information about the task execution (why it must run, etc):: + + def configure(conf): + conf.load('why') + +After adding the tool, a full rebuild is necessary: +waf clean build --zones=task +""" + +from waflib import Task, Utils, Logs, Errors + +def signature(self): + # compute the result one time, and suppose the scan_signature will give the good result + try: + return self.cache_sig + except AttributeError: + pass + + self.m = Utils.md5() + self.m.update(self.hcode) + id_sig = self.m.digest() + + # explicit deps + self.m = Utils.md5() + self.sig_explicit_deps() + exp_sig = self.m.digest() + + # env vars + self.m = Utils.md5() + self.sig_vars() + var_sig = self.m.digest() + + # implicit deps / scanner results + self.m = Utils.md5() + if self.scan: + try: + self.sig_implicit_deps() + except Errors.TaskRescan: + return self.signature() + impl_sig = self.m.digest() + + ret = self.cache_sig = impl_sig + id_sig + exp_sig + var_sig + return ret + + +Task.Task.signature = signature + +old = Task.Task.runnable_status +def runnable_status(self): + ret = old(self) + if ret == Task.RUN_ME: + try: + old_sigs = self.generator.bld.task_sigs[self.uid()] + except (KeyError, AttributeError): + Logs.debug("task: task must run as no previous signature exists") + else: + new_sigs = self.cache_sig + def v(x): + return Utils.to_hex(x) + + Logs.debug('Task %r', self) + msgs = ['* Implicit or scanner dependency', '* Task code', '* Source file, explicit or manual dependency', '* Configuration data variable'] + tmp = 'task: -> %s: %s %s' + for x in range(len(msgs)): + l = len(Utils.SIG_NIL) + a = new_sigs[x*l : (x+1)*l] + b = old_sigs[x*l : (x+1)*l] + if (a != b): + Logs.debug(tmp, msgs[x].ljust(35), v(a), v(b)) + return ret +Task.Task.runnable_status = runnable_status + diff --git a/third_party/waf/waflib/extras/win32_opts.py b/third_party/waf/waflib/extras/win32_opts.py new file mode 100644 index 0000000..9f7443c --- /dev/null +++ b/third_party/waf/waflib/extras/win32_opts.py @@ -0,0 +1,170 @@ +#! /usr/bin/env python +# encoding: utf-8 + +""" +Windows-specific optimizations + +This module can help reducing the overhead of listing files on windows +(more than 10000 files). Python 3.5 already provides the listdir +optimization though. +""" + +import os +from waflib import Utils, Build, Node, Logs + +try: + TP = '%s\\*'.decode('ascii') +except AttributeError: + TP = '%s\\*' + +if Utils.is_win32: + from waflib.Tools import md5_tstamp + import ctypes, ctypes.wintypes + + FindFirstFile = ctypes.windll.kernel32.FindFirstFileW + FindNextFile = ctypes.windll.kernel32.FindNextFileW + FindClose = ctypes.windll.kernel32.FindClose + FILE_ATTRIBUTE_DIRECTORY = 0x10 + INVALID_HANDLE_VALUE = -1 + UPPER_FOLDERS = ('.', '..') + try: + UPPER_FOLDERS = [unicode(x) for x in UPPER_FOLDERS] + except NameError: + pass + + def cached_hash_file(self): + try: + cache = self.ctx.cache_listdir_cache_hash_file + except AttributeError: + cache = self.ctx.cache_listdir_cache_hash_file = {} + + if id(self.parent) in cache: + try: + t = cache[id(self.parent)][self.name] + except KeyError: + raise IOError('Not a file') + else: + # an opportunity to list the files and the timestamps at once + findData = ctypes.wintypes.WIN32_FIND_DATAW() + find = FindFirstFile(TP % self.parent.abspath(), ctypes.byref(findData)) + + if find == INVALID_HANDLE_VALUE: + cache[id(self.parent)] = {} + raise IOError('Not a file') + + cache[id(self.parent)] = lst_files = {} + try: + while True: + if findData.cFileName not in UPPER_FOLDERS: + thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY + if not thatsadir: + ts = findData.ftLastWriteTime + d = (ts.dwLowDateTime << 32) | ts.dwHighDateTime + lst_files[str(findData.cFileName)] = d + if not FindNextFile(find, ctypes.byref(findData)): + break + except Exception: + cache[id(self.parent)] = {} + raise IOError('Not a file') + finally: + FindClose(find) + t = lst_files[self.name] + + fname = self.abspath() + if fname in Build.hashes_md5_tstamp: + if Build.hashes_md5_tstamp[fname][0] == t: + return Build.hashes_md5_tstamp[fname][1] + + try: + fd = os.open(fname, os.O_BINARY | os.O_RDONLY | os.O_NOINHERIT) + except OSError: + raise IOError('Cannot read from %r' % fname) + f = os.fdopen(fd, 'rb') + m = Utils.md5() + rb = 1 + try: + while rb: + rb = f.read(200000) + m.update(rb) + finally: + f.close() + + # ensure that the cache is overwritten + Build.hashes_md5_tstamp[fname] = (t, m.digest()) + return m.digest() + Node.Node.cached_hash_file = cached_hash_file + + def get_bld_sig_win32(self): + try: + return self.ctx.hash_cache[id(self)] + except KeyError: + pass + except AttributeError: + self.ctx.hash_cache = {} + self.ctx.hash_cache[id(self)] = ret = Utils.h_file(self.abspath()) + return ret + Node.Node.get_bld_sig = get_bld_sig_win32 + + def isfile_cached(self): + # optimize for nt.stat calls, assuming there are many files for few folders + try: + cache = self.__class__.cache_isfile_cache + except AttributeError: + cache = self.__class__.cache_isfile_cache = {} + + try: + c1 = cache[id(self.parent)] + except KeyError: + c1 = cache[id(self.parent)] = [] + + curpath = self.parent.abspath() + findData = ctypes.wintypes.WIN32_FIND_DATAW() + find = FindFirstFile(TP % curpath, ctypes.byref(findData)) + + if find == INVALID_HANDLE_VALUE: + Logs.error("invalid win32 handle isfile_cached %r", self.abspath()) + return os.path.isfile(self.abspath()) + + try: + while True: + if findData.cFileName not in UPPER_FOLDERS: + thatsadir = findData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY + if not thatsadir: + c1.append(str(findData.cFileName)) + if not FindNextFile(find, ctypes.byref(findData)): + break + except Exception as e: + Logs.error('exception while listing a folder %r %r', self.abspath(), e) + return os.path.isfile(self.abspath()) + finally: + FindClose(find) + return self.name in c1 + Node.Node.isfile_cached = isfile_cached + + def find_or_declare_win32(self, lst): + # assuming that "find_or_declare" is called before the build starts, remove the calls to os.path.isfile + if isinstance(lst, str): + lst = [x for x in Utils.split_path(lst) if x and x != '.'] + + node = self.get_bld().search_node(lst) + if node: + if not node.isfile_cached(): + try: + node.parent.mkdir() + except OSError: + pass + return node + self = self.get_src() + node = self.find_node(lst) + if node: + if not node.isfile_cached(): + try: + node.parent.mkdir() + except OSError: + pass + return node + node = self.get_bld().make_node(lst) + node.parent.mkdir() + return node + Node.Node.find_or_declare = find_or_declare_win32 + diff --git a/third_party/waf/waflib/extras/wix.py b/third_party/waf/waflib/extras/wix.py new file mode 100644 index 0000000..d87bfbb --- /dev/null +++ b/third_party/waf/waflib/extras/wix.py @@ -0,0 +1,87 @@ +#!/usr/bin/python +# encoding: utf-8 +# vim: tabstop=4 noexpandtab + +""" +Windows Installer XML Tool (WiX) + +.wxs --- candle ---> .wxobj --- light ---> .msi + +bld(features='wix', some.wxs, gen='some.msi', candleflags=[..], lightflags=[..]) + +bld(features='wix', source=['bundle.wxs','WixBalExtension'], gen='setup.exe', candleflags=[..]) +""" + +import os, copy +from waflib import TaskGen +from waflib import Task +from waflib.Utils import winreg + +class candle(Task.Task): + run_str = '${CANDLE} -nologo ${CANDLEFLAGS} -out ${TGT} ${SRC[0].abspath()}', + +class light(Task.Task): + run_str = "${LIGHT} -nologo -b ${SRC[0].parent.abspath()} ${LIGHTFLAGS} -out ${TGT} ${SRC[0].abspath()}" + +@TaskGen.feature('wix') +@TaskGen.before_method('process_source') +def wix(self): + #X.wxs -> ${SRC} for CANDLE + #X.wxobj -> ${SRC} for LIGHT + #X.dll -> -ext X in ${LIGHTFLAGS} + #X.wxl -> wixui.wixlib -loc X.wxl in ${LIGHTFLAGS} + wxobj = [] + wxs = [] + exts = [] + wxl = [] + rest = [] + for x in self.source: + if x.endswith('.wxobj'): + wxobj.append(x) + elif x.endswith('.wxs'): + wxobj.append(self.path.find_or_declare(x[:-4]+'.wxobj')) + wxs.append(x) + elif x.endswith('.dll'): + exts.append(x[:-4]) + elif '.' not in x: + exts.append(x) + elif x.endswith('.wxl'): + wxl.append(x) + else: + rest.append(x) + self.source = self.to_nodes(rest) #.wxs + + cndl = self.create_task('candle', self.to_nodes(wxs), self.to_nodes(wxobj)) + lght = self.create_task('light', self.to_nodes(wxobj), self.path.find_or_declare(self.gen)) + + cndl.env.CANDLEFLAGS = copy.copy(getattr(self,'candleflags',[])) + lght.env.LIGHTFLAGS = copy.copy(getattr(self,'lightflags',[])) + + for x in wxl: + lght.env.append_value('LIGHTFLAGS','wixui.wixlib') + lght.env.append_value('LIGHTFLAGS','-loc') + lght.env.append_value('LIGHTFLAGS',x) + for x in exts: + cndl.env.append_value('CANDLEFLAGS','-ext') + cndl.env.append_value('CANDLEFLAGS',x) + lght.env.append_value('LIGHTFLAGS','-ext') + lght.env.append_value('LIGHTFLAGS',x) + +#wix_bin_path() +def wix_bin_path(): + basekey = r"SOFTWARE\Microsoft\.NETFramework\AssemblyFolders" + query = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, basekey) + cnt=winreg.QueryInfoKey(query)[0] + thiskey = r'C:\Program Files (x86)\WiX Toolset v3.10\SDK' + for i in range(cnt-1,-1,-1): + thiskey = winreg.EnumKey(query,i) + if 'WiX' in thiskey: + break + winreg.CloseKey(query) + return os.path.normpath(winreg.QueryValue(winreg.HKEY_LOCAL_MACHINE, basekey+r'\\'+thiskey)+'..\\bin') + +def configure(ctx): + path_list=[wix_bin_path()] + ctx.find_program('candle', var='CANDLE', mandatory=True, path_list = path_list) + ctx.find_program('light', var='LIGHT', mandatory=True, path_list = path_list) + diff --git a/third_party/waf/waflib/extras/xcode6.py b/third_party/waf/waflib/extras/xcode6.py new file mode 100644 index 0000000..c5b3091 --- /dev/null +++ b/third_party/waf/waflib/extras/xcode6.py @@ -0,0 +1,727 @@ +#! /usr/bin/env python +# encoding: utf-8 +# XCode 3/XCode 4/XCode 6/Xcode 7 generator for Waf +# Based on work by Nicolas Mercier 2011 +# Extended by Simon Warg 2015, https://github.com/mimon +# XCode project file format based on http://www.monobjc.net/xcode-project-file-format.html + +""" +See playground/xcode6/ for usage examples. + +""" + +from waflib import Context, TaskGen, Build, Utils, Errors, Logs +import os, sys + +# FIXME too few extensions +XCODE_EXTS = ['.c', '.cpp', '.m', '.mm'] + +HEADERS_GLOB = '**/(*.h|*.hpp|*.H|*.inl)' + +MAP_EXT = { + '': "folder", + '.h' : "sourcecode.c.h", + + '.hh': "sourcecode.cpp.h", + '.inl': "sourcecode.cpp.h", + '.hpp': "sourcecode.cpp.h", + + '.c': "sourcecode.c.c", + + '.m': "sourcecode.c.objc", + + '.mm': "sourcecode.cpp.objcpp", + + '.cc': "sourcecode.cpp.cpp", + + '.cpp': "sourcecode.cpp.cpp", + '.C': "sourcecode.cpp.cpp", + '.cxx': "sourcecode.cpp.cpp", + '.c++': "sourcecode.cpp.cpp", + + '.l': "sourcecode.lex", # luthor + '.ll': "sourcecode.lex", + + '.y': "sourcecode.yacc", + '.yy': "sourcecode.yacc", + + '.plist': "text.plist.xml", + ".nib": "wrapper.nib", + ".xib": "text.xib", +} + +# Used in PBXNativeTarget elements +PRODUCT_TYPE_APPLICATION = 'com.apple.product-type.application' +PRODUCT_TYPE_FRAMEWORK = 'com.apple.product-type.framework' +PRODUCT_TYPE_EXECUTABLE = 'com.apple.product-type.tool' +PRODUCT_TYPE_LIB_STATIC = 'com.apple.product-type.library.static' +PRODUCT_TYPE_LIB_DYNAMIC = 'com.apple.product-type.library.dynamic' +PRODUCT_TYPE_EXTENSION = 'com.apple.product-type.kernel-extension' +PRODUCT_TYPE_IOKIT = 'com.apple.product-type.kernel-extension.iokit' + +# Used in PBXFileReference elements +FILE_TYPE_APPLICATION = 'wrapper.cfbundle' +FILE_TYPE_FRAMEWORK = 'wrapper.framework' +FILE_TYPE_LIB_DYNAMIC = 'compiled.mach-o.dylib' +FILE_TYPE_LIB_STATIC = 'archive.ar' +FILE_TYPE_EXECUTABLE = 'compiled.mach-o.executable' + +# Tuple packs of the above +TARGET_TYPE_FRAMEWORK = (PRODUCT_TYPE_FRAMEWORK, FILE_TYPE_FRAMEWORK, '.framework') +TARGET_TYPE_APPLICATION = (PRODUCT_TYPE_APPLICATION, FILE_TYPE_APPLICATION, '.app') +TARGET_TYPE_DYNAMIC_LIB = (PRODUCT_TYPE_LIB_DYNAMIC, FILE_TYPE_LIB_DYNAMIC, '.dylib') +TARGET_TYPE_STATIC_LIB = (PRODUCT_TYPE_LIB_STATIC, FILE_TYPE_LIB_STATIC, '.a') +TARGET_TYPE_EXECUTABLE = (PRODUCT_TYPE_EXECUTABLE, FILE_TYPE_EXECUTABLE, '') + +# Maps target type string to its data +TARGET_TYPES = { + 'framework': TARGET_TYPE_FRAMEWORK, + 'app': TARGET_TYPE_APPLICATION, + 'dylib': TARGET_TYPE_DYNAMIC_LIB, + 'stlib': TARGET_TYPE_STATIC_LIB, + 'exe' :TARGET_TYPE_EXECUTABLE, +} + +def delete_invalid_values(dct): + """ Deletes entries that are dictionaries or sets """ + for k, v in list(dct.items()): + if isinstance(v, dict) or isinstance(v, set): + del dct[k] + return dct + +""" +Configuration of the global project settings. Sets an environment variable 'PROJ_CONFIGURATION' +which is a dictionary of configuration name and buildsettings pair. +E.g.: +env.PROJ_CONFIGURATION = { + 'Debug': { + 'ARCHS': 'x86', + ... + } + 'Release': { + 'ARCHS': x86_64' + ... + } +} +The user can define a completely customized dictionary in configure() stage. Otherwise a default Debug/Release will be created +based on env variable +""" +def configure(self): + if not self.env.PROJ_CONFIGURATION: + self.to_log("A default project configuration was created since no custom one was given in the configure(conf) stage. Define your custom project settings by adding PROJ_CONFIGURATION to env. The env.PROJ_CONFIGURATION must be a dictionary with at least one key, where each key is the configuration name, and the value is a dictionary of key/value settings.\n") + + # Check for any added config files added by the tool 'c_config'. + if 'cfg_files' in self.env: + self.env.INCLUDES = Utils.to_list(self.env.INCLUDES) + [os.path.abspath(os.path.dirname(f)) for f in self.env.cfg_files] + + # Create default project configuration? + if 'PROJ_CONFIGURATION' not in self.env: + defaults = delete_invalid_values(self.env.get_merged_dict()) + self.env.PROJ_CONFIGURATION = { + "Debug": defaults, + "Release": defaults, + } + + # Some build settings are required to be present by XCode. We will supply default values + # if user hasn't defined any. + defaults_required = [('PRODUCT_NAME', '$(TARGET_NAME)')] + for cfgname,settings in self.env.PROJ_CONFIGURATION.items(): + for default_var, default_val in defaults_required: + if default_var not in settings: + settings[default_var] = default_val + + # Error check customization + if not isinstance(self.env.PROJ_CONFIGURATION, dict): + raise Errors.ConfigurationError("The env.PROJ_CONFIGURATION must be a dictionary with at least one key, where each key is the configuration name, and the value is a dictionary of key/value settings.") + +part1 = 0 +part2 = 10000 +part3 = 0 +id = 562000999 +def newid(): + global id + id += 1 + return "%04X%04X%04X%012d" % (0, 10000, 0, id) + +""" +Represents a tree node in the XCode project plist file format. +When written to a file, all attributes of XCodeNode are stringified together with +its value. However, attributes starting with an underscore _ are ignored +during that process and allows you to store arbitrary values that are not supposed +to be written out. +""" +class XCodeNode(object): + def __init__(self): + self._id = newid() + self._been_written = False + + def tostring(self, value): + if isinstance(value, dict): + result = "{\n" + for k,v in value.items(): + result = result + "\t\t\t%s = %s;\n" % (k, self.tostring(v)) + result = result + "\t\t}" + return result + elif isinstance(value, str): + return '"%s"' % value.replace('"', '\\\\\\"') + elif isinstance(value, list): + result = "(\n" + for i in value: + result = result + "\t\t\t\t%s,\n" % self.tostring(i) + result = result + "\t\t\t)" + return result + elif isinstance(value, XCodeNode): + return value._id + else: + return str(value) + + def write_recursive(self, value, file): + if isinstance(value, dict): + for k,v in value.items(): + self.write_recursive(v, file) + elif isinstance(value, list): + for i in value: + self.write_recursive(i, file) + elif isinstance(value, XCodeNode): + value.write(file) + + def write(self, file): + if not self._been_written: + self._been_written = True + for attribute,value in self.__dict__.items(): + if attribute[0] != '_': + self.write_recursive(value, file) + w = file.write + w("\t%s = {\n" % self._id) + w("\t\tisa = %s;\n" % self.__class__.__name__) + for attribute,value in self.__dict__.items(): + if attribute[0] != '_': + w("\t\t%s = %s;\n" % (attribute, self.tostring(value))) + w("\t};\n\n") + +# Configurations +class XCBuildConfiguration(XCodeNode): + def __init__(self, name, settings = {}, env=None): + XCodeNode.__init__(self) + self.baseConfigurationReference = "" + self.buildSettings = settings + self.name = name + if env and env.ARCH: + settings['ARCHS'] = " ".join(env.ARCH) + + +class XCConfigurationList(XCodeNode): + def __init__(self, configlst): + """ :param configlst: list of XCConfigurationList """ + XCodeNode.__init__(self) + self.buildConfigurations = configlst + self.defaultConfigurationIsVisible = 0 + self.defaultConfigurationName = configlst and configlst[0].name or "" + +# Group/Files +class PBXFileReference(XCodeNode): + def __init__(self, name, path, filetype = '', sourcetree = "SOURCE_ROOT"): + + XCodeNode.__init__(self) + self.fileEncoding = 4 + if not filetype: + _, ext = os.path.splitext(name) + filetype = MAP_EXT.get(ext, 'text') + self.lastKnownFileType = filetype + self.explicitFileType = filetype + self.name = name + self.path = path + self.sourceTree = sourcetree + + def __hash__(self): + return (self.path+self.name).__hash__() + + def __eq__(self, other): + return (self.path, self.name) == (other.path, other.name) + +class PBXBuildFile(XCodeNode): + """ This element indicate a file reference that is used in a PBXBuildPhase (either as an include or resource). """ + def __init__(self, fileRef, settings={}): + XCodeNode.__init__(self) + + # fileRef is a reference to a PBXFileReference object + self.fileRef = fileRef + + # A map of key/value pairs for additional settings. + self.settings = settings + + def __hash__(self): + return (self.fileRef).__hash__() + + def __eq__(self, other): + return self.fileRef == other.fileRef + +class PBXGroup(XCodeNode): + def __init__(self, name, sourcetree = 'SOURCE_TREE'): + XCodeNode.__init__(self) + self.children = [] + self.name = name + self.sourceTree = sourcetree + + # Maintain a lookup table for all PBXFileReferences + # that are contained in this group. + self._filerefs = {} + + def add(self, sources): + """ + Add a list of PBXFileReferences to this group + + :param sources: list of PBXFileReferences objects + """ + self._filerefs.update(dict(zip(sources, sources))) + self.children.extend(sources) + + def get_sub_groups(self): + """ + Returns all child PBXGroup objects contained in this group + """ + return list(filter(lambda x: isinstance(x, PBXGroup), self.children)) + + def find_fileref(self, fileref): + """ + Recursively search this group for an existing PBXFileReference. Returns None + if none were found. + + The reason you'd want to reuse existing PBXFileReferences from a PBXGroup is that XCode doesn't like PBXFileReferences that aren't part of a PBXGroup hierarchy. + If it isn't, the consequence is that certain UI features like 'Reveal in Finder' + stops working. + """ + if fileref in self._filerefs: + return self._filerefs[fileref] + elif self.children: + for childgroup in self.get_sub_groups(): + f = childgroup.find_fileref(fileref) + if f: + return f + return None + +class PBXContainerItemProxy(XCodeNode): + """ This is the element for to decorate a target item. """ + def __init__(self, containerPortal, remoteGlobalIDString, remoteInfo='', proxyType=1): + XCodeNode.__init__(self) + self.containerPortal = containerPortal # PBXProject + self.remoteGlobalIDString = remoteGlobalIDString # PBXNativeTarget + self.remoteInfo = remoteInfo # Target name + self.proxyType = proxyType + +class PBXTargetDependency(XCodeNode): + """ This is the element for referencing other target through content proxies. """ + def __init__(self, native_target, proxy): + XCodeNode.__init__(self) + self.target = native_target + self.targetProxy = proxy + +class PBXFrameworksBuildPhase(XCodeNode): + """ This is the element for the framework link build phase, i.e. linking to frameworks """ + def __init__(self, pbxbuildfiles): + XCodeNode.__init__(self) + self.buildActionMask = 2147483647 + self.runOnlyForDeploymentPostprocessing = 0 + self.files = pbxbuildfiles #List of PBXBuildFile (.o, .framework, .dylib) + +class PBXHeadersBuildPhase(XCodeNode): + """ This is the element for adding header files to be packaged into the .framework """ + def __init__(self, pbxbuildfiles): + XCodeNode.__init__(self) + self.buildActionMask = 2147483647 + self.runOnlyForDeploymentPostprocessing = 0 + self.files = pbxbuildfiles #List of PBXBuildFile (.o, .framework, .dylib) + +class PBXCopyFilesBuildPhase(XCodeNode): + """ + Represents the PBXCopyFilesBuildPhase section. PBXBuildFile + can be added to this node to copy files after build is done. + """ + def __init__(self, pbxbuildfiles, dstpath, dstSubpathSpec=0, *args, **kwargs): + XCodeNode.__init__(self) + self.files = pbxbuildfiles + self.dstPath = dstpath + self.dstSubfolderSpec = dstSubpathSpec + +class PBXSourcesBuildPhase(XCodeNode): + """ Represents the 'Compile Sources' build phase in a Xcode target """ + def __init__(self, buildfiles): + XCodeNode.__init__(self) + self.files = buildfiles # List of PBXBuildFile objects + +class PBXLegacyTarget(XCodeNode): + def __init__(self, action, target=''): + XCodeNode.__init__(self) + self.buildConfigurationList = XCConfigurationList([XCBuildConfiguration('waf', {})]) + if not target: + self.buildArgumentsString = "%s %s" % (sys.argv[0], action) + else: + self.buildArgumentsString = "%s %s --targets=%s" % (sys.argv[0], action, target) + self.buildPhases = [] + self.buildToolPath = sys.executable + self.buildWorkingDirectory = "" + self.dependencies = [] + self.name = target or action + self.productName = target or action + self.passBuildSettingsInEnvironment = 0 + +class PBXShellScriptBuildPhase(XCodeNode): + def __init__(self, action, target): + XCodeNode.__init__(self) + self.buildActionMask = 2147483647 + self.files = [] + self.inputPaths = [] + self.outputPaths = [] + self.runOnlyForDeploymentPostProcessing = 0 + self.shellPath = "/bin/sh" + self.shellScript = "%s %s %s --targets=%s" % (sys.executable, sys.argv[0], action, target) + +class PBXNativeTarget(XCodeNode): + """ Represents a target in XCode, e.g. App, DyLib, Framework etc. """ + def __init__(self, target, node, target_type=TARGET_TYPE_APPLICATION, configlist=[], buildphases=[]): + XCodeNode.__init__(self) + product_type = target_type[0] + file_type = target_type[1] + + self.buildConfigurationList = XCConfigurationList(configlist) + self.buildPhases = buildphases + self.buildRules = [] + self.dependencies = [] + self.name = target + self.productName = target + self.productType = product_type # See TARGET_TYPE_ tuples constants + self.productReference = PBXFileReference(node.name, node.abspath(), file_type, '') + + def add_configuration(self, cf): + """ :type cf: XCBuildConfiguration """ + self.buildConfigurationList.buildConfigurations.append(cf) + + def add_build_phase(self, phase): + # Some build phase types may appear only once. If a phase type already exists, then merge them. + if ( (phase.__class__ == PBXFrameworksBuildPhase) + or (phase.__class__ == PBXSourcesBuildPhase) ): + for b in self.buildPhases: + if b.__class__ == phase.__class__: + b.files.extend(phase.files) + return + self.buildPhases.append(phase) + + def add_dependency(self, depnd): + self.dependencies.append(depnd) + +# Root project object +class PBXProject(XCodeNode): + def __init__(self, name, version, env): + XCodeNode.__init__(self) + + if not isinstance(env.PROJ_CONFIGURATION, dict): + raise Errors.WafError("Error: env.PROJ_CONFIGURATION must be a dictionary. This is done for you if you do not define one yourself. However, did you load the xcode module at the end of your wscript configure() ?") + + # Retrieve project configuration + configurations = [] + for config_name, settings in env.PROJ_CONFIGURATION.items(): + cf = XCBuildConfiguration(config_name, settings) + configurations.append(cf) + + self.buildConfigurationList = XCConfigurationList(configurations) + self.compatibilityVersion = version[0] + self.hasScannedForEncodings = 1 + self.mainGroup = PBXGroup(name) + self.projectRoot = "" + self.projectDirPath = "" + self.targets = [] + self._objectVersion = version[1] + + def create_target_dependency(self, target, name): + """ : param target : PXBNativeTarget """ + proxy = PBXContainerItemProxy(self, target, name) + dependency = PBXTargetDependency(target, proxy) + return dependency + + def write(self, file): + + # Make sure this is written only once + if self._been_written: + return + + w = file.write + w("// !$*UTF8*$!\n") + w("{\n") + w("\tarchiveVersion = 1;\n") + w("\tclasses = {\n") + w("\t};\n") + w("\tobjectVersion = %d;\n" % self._objectVersion) + w("\tobjects = {\n\n") + + XCodeNode.write(self, file) + + w("\t};\n") + w("\trootObject = %s;\n" % self._id) + w("}\n") + + def add_target(self, target): + self.targets.append(target) + + def get_target(self, name): + """ Get a reference to PBXNativeTarget if it exists """ + for t in self.targets: + if t.name == name: + return t + return None + +@TaskGen.feature('c', 'cxx') +@TaskGen.after('propagate_uselib_vars', 'apply_incpaths') +def process_xcode(self): + bld = self.bld + try: + p = bld.project + except AttributeError: + return + + if not hasattr(self, 'target_type'): + return + + products_group = bld.products_group + + target_group = PBXGroup(self.name) + p.mainGroup.children.append(target_group) + + # Determine what type to build - framework, app bundle etc. + target_type = getattr(self, 'target_type', 'app') + if target_type not in TARGET_TYPES: + raise Errors.WafError("Target type '%s' does not exists. Available options are '%s'. In target '%s'" % (target_type, "', '".join(TARGET_TYPES.keys()), self.name)) + else: + target_type = TARGET_TYPES[target_type] + file_ext = target_type[2] + + # Create the output node + target_node = self.path.find_or_declare(self.name+file_ext) + target = PBXNativeTarget(self.name, target_node, target_type, [], []) + + products_group.children.append(target.productReference) + + # Pull source files from the 'source' attribute and assign them to a UI group. + # Use a default UI group named 'Source' unless the user + # provides a 'group_files' dictionary to customize the UI grouping. + sources = getattr(self, 'source', []) + if hasattr(self, 'group_files'): + group_files = getattr(self, 'group_files', []) + for grpname,files in group_files.items(): + group = bld.create_group(grpname, files) + target_group.children.append(group) + else: + group = bld.create_group('Source', sources) + target_group.children.append(group) + + # Create a PBXFileReference for each source file. + # If the source file already exists as a PBXFileReference in any of the UI groups, then + # reuse that PBXFileReference object (XCode does not like it if we don't reuse) + for idx, path in enumerate(sources): + fileref = PBXFileReference(path.name, path.abspath()) + existing_fileref = target_group.find_fileref(fileref) + if existing_fileref: + sources[idx] = existing_fileref + else: + sources[idx] = fileref + + # If the 'source' attribute contains any file extension that XCode can't work with, + # then remove it. The allowed file extensions are defined in XCODE_EXTS. + is_valid_file_extension = lambda file: os.path.splitext(file.path)[1] in XCODE_EXTS + sources = list(filter(is_valid_file_extension, sources)) + + buildfiles = [bld.unique_buildfile(PBXBuildFile(x)) for x in sources] + target.add_build_phase(PBXSourcesBuildPhase(buildfiles)) + + # Check if any framework to link against is some other target we've made + libs = getattr(self, 'tmp_use_seen', []) + for lib in libs: + use_target = p.get_target(lib) + if use_target: + # Create an XCode dependency so that XCode knows to build the other target before this target + dependency = p.create_target_dependency(use_target, use_target.name) + target.add_dependency(dependency) + + buildphase = PBXFrameworksBuildPhase([PBXBuildFile(use_target.productReference)]) + target.add_build_phase(buildphase) + if lib in self.env.LIB: + self.env.LIB = list(filter(lambda x: x != lib, self.env.LIB)) + + # If 'export_headers' is present, add files to the Headers build phase in xcode. + # These are files that'll get packed into the Framework for instance. + exp_hdrs = getattr(self, 'export_headers', []) + hdrs = bld.as_nodes(Utils.to_list(exp_hdrs)) + files = [p.mainGroup.find_fileref(PBXFileReference(n.name, n.abspath())) for n in hdrs] + files = [PBXBuildFile(f, {'ATTRIBUTES': ('Public',)}) for f in files] + buildphase = PBXHeadersBuildPhase(files) + target.add_build_phase(buildphase) + + # Merge frameworks and libs into one list, and prefix the frameworks + frameworks = Utils.to_list(self.env.FRAMEWORK) + frameworks = ' '.join(['-framework %s' % (f.split('.framework')[0]) for f in frameworks]) + + libs = Utils.to_list(self.env.STLIB) + Utils.to_list(self.env.LIB) + libs = ' '.join(bld.env['STLIB_ST'] % t for t in libs) + + # Override target specific build settings + bldsettings = { + 'HEADER_SEARCH_PATHS': ['$(inherited)'] + self.env['INCPATHS'], + 'LIBRARY_SEARCH_PATHS': ['$(inherited)'] + Utils.to_list(self.env.LIBPATH) + Utils.to_list(self.env.STLIBPATH) + Utils.to_list(self.env.LIBDIR), + 'FRAMEWORK_SEARCH_PATHS': ['$(inherited)'] + Utils.to_list(self.env.FRAMEWORKPATH), + 'OTHER_LDFLAGS': libs + ' ' + frameworks + ' ' + ' '.join(bld.env['LINKFLAGS']), + 'OTHER_CPLUSPLUSFLAGS': Utils.to_list(self.env['CXXFLAGS']), + 'OTHER_CFLAGS': Utils.to_list(self.env['CFLAGS']), + 'INSTALL_PATH': [], + 'GCC_PREPROCESSOR_DEFINITIONS': self.env['DEFINES'] + } + + # Install path + installpaths = Utils.to_list(getattr(self, 'install', [])) + prodbuildfile = PBXBuildFile(target.productReference) + for instpath in installpaths: + bldsettings['INSTALL_PATH'].append(instpath) + target.add_build_phase(PBXCopyFilesBuildPhase([prodbuildfile], instpath)) + + if not bldsettings['INSTALL_PATH']: + del bldsettings['INSTALL_PATH'] + + # Create build settings which can override the project settings. Defaults to none if user + # did not pass argument. This will be filled up with target specific + # search paths, libs to link etc. + settings = getattr(self, 'settings', {}) + + # The keys represents different build configuration, e.g. Debug, Release and so on.. + # Insert our generated build settings to all configuration names + keys = set(settings.keys()) | set(bld.env.PROJ_CONFIGURATION.keys()) + for k in keys: + if k in settings: + settings[k].update(bldsettings) + else: + settings[k] = bldsettings + + for k,v in settings.items(): + target.add_configuration(XCBuildConfiguration(k, v)) + + p.add_target(target) + + +class xcode(Build.BuildContext): + cmd = 'xcode6' + fun = 'build' + + def as_nodes(self, files): + """ Returns a list of waflib.Nodes from a list of string of file paths """ + nodes = [] + for x in files: + if not isinstance(x, str): + d = x + else: + d = self.srcnode.find_node(x) + if not d: + raise Errors.WafError('File \'%s\' was not found' % x) + nodes.append(d) + return nodes + + def create_group(self, name, files): + """ + Returns a new PBXGroup containing the files (paths) passed in the files arg + :type files: string + """ + group = PBXGroup(name) + """ + Do not use unique file reference here, since XCode seem to allow only one file reference + to be referenced by a group. + """ + files_ = [] + for d in self.as_nodes(Utils.to_list(files)): + fileref = PBXFileReference(d.name, d.abspath()) + files_.append(fileref) + group.add(files_) + return group + + def unique_buildfile(self, buildfile): + """ + Returns a unique buildfile, possibly an existing one. + Use this after you've constructed a PBXBuildFile to make sure there is + only one PBXBuildFile for the same file in the same project. + """ + try: + build_files = self.build_files + except AttributeError: + build_files = self.build_files = {} + + if buildfile not in build_files: + build_files[buildfile] = buildfile + return build_files[buildfile] + + def execute(self): + """ + Entry point + """ + self.restore() + if not self.all_envs: + self.load_envs() + self.recurse([self.run_dir]) + + appname = getattr(Context.g_module, Context.APPNAME, os.path.basename(self.srcnode.abspath())) + + p = PBXProject(appname, ('Xcode 3.2', 46), self.env) + + # If we don't create a Products group, then + # XCode will create one, which entails that + # we'll start to see duplicate files in the UI + # for some reason. + products_group = PBXGroup('Products') + p.mainGroup.children.append(products_group) + + self.project = p + self.products_group = products_group + + # post all task generators + # the process_xcode method above will be called for each target + if self.targets and self.targets != '*': + (self._min_grp, self._exact_tg) = self.get_targets() + + self.current_group = 0 + while self.current_group < len(self.groups): + self.post_group() + self.current_group += 1 + + node = self.bldnode.make_node('%s.xcodeproj' % appname) + node.mkdir() + node = node.make_node('project.pbxproj') + with open(node.abspath(), 'w') as f: + p.write(f) + Logs.pprint('GREEN', 'Wrote %r' % node.abspath()) + +def bind_fun(tgtype): + def fun(self, *k, **kw): + tgtype = fun.__name__ + if tgtype == 'shlib' or tgtype == 'dylib': + features = 'cxx cxxshlib' + tgtype = 'dylib' + elif tgtype == 'framework': + features = 'cxx cxxshlib' + tgtype = 'framework' + elif tgtype == 'program': + features = 'cxx cxxprogram' + tgtype = 'exe' + elif tgtype == 'app': + features = 'cxx cxxprogram' + tgtype = 'app' + elif tgtype == 'stlib': + features = 'cxx cxxstlib' + tgtype = 'stlib' + lst = kw['features'] = Utils.to_list(kw.get('features', [])) + for x in features.split(): + if not x in kw['features']: + lst.append(x) + + kw['target_type'] = tgtype + return self(*k, **kw) + fun.__name__ = tgtype + setattr(Build.BuildContext, tgtype, fun) + return fun + +for xx in 'app framework dylib shlib stlib program'.split(): + bind_fun(xx) + diff --git a/third_party/waf/waflib/fixpy2.py b/third_party/waf/waflib/fixpy2.py new file mode 100644 index 0000000..c99bff4 --- /dev/null +++ b/third_party/waf/waflib/fixpy2.py @@ -0,0 +1,64 @@ +#!/usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2010-2018 (ita) + +from __future__ import with_statement + +import os + +all_modifs = {} + +def fixdir(dir): + """Call all substitution functions on Waf folders""" + for k in all_modifs: + for v in all_modifs[k]: + modif(os.path.join(dir, 'waflib'), k, v) + +def modif(dir, name, fun): + """Call a substitution function""" + if name == '*': + lst = [] + for y in '. Tools extras'.split(): + for x in os.listdir(os.path.join(dir, y)): + if x.endswith('.py'): + lst.append(y + os.sep + x) + for x in lst: + modif(dir, x, fun) + return + + filename = os.path.join(dir, name) + with open(filename, 'r') as f: + txt = f.read() + + txt = fun(txt) + + with open(filename, 'w') as f: + f.write(txt) + +def subst(*k): + """register a substitution function""" + def do_subst(fun): + for x in k: + try: + all_modifs[x].append(fun) + except KeyError: + all_modifs[x] = [fun] + return fun + return do_subst + +@subst('*') +def r1(code): + "utf-8 fixes for python < 2.6" + code = code.replace('as e:', ',e:') + code = code.replace(".decode(sys.stdout.encoding or'latin-1',errors='replace')", '') + return code.replace('.encode()', '') + +@subst('Runner.py') +def r4(code): + "generator syntax" + return code.replace('next(self.biter)', 'self.biter.next()').replace('self.daemon = True', 'self.setDaemon(1)') + +@subst('Context.py') +def r5(code): + return code.replace("('Execution failure: %s'%str(e),ex=e)", "('Execution failure: %s'%str(e),ex=e),None,sys.exc_info()[2]") + diff --git a/third_party/waf/waflib/processor.py b/third_party/waf/waflib/processor.py new file mode 100755 index 0000000..eff2e69 --- /dev/null +++ b/third_party/waf/waflib/processor.py @@ -0,0 +1,68 @@ +#! /usr/bin/env python +# encoding: utf-8 +# Thomas Nagy, 2016-2018 (ita) + +import os, sys, traceback, base64, signal +try: + import cPickle +except ImportError: + import pickle as cPickle + +try: + import subprocess32 as subprocess +except ImportError: + import subprocess + +try: + TimeoutExpired = subprocess.TimeoutExpired +except AttributeError: + class TimeoutExpired(Exception): + pass + +def run(): + txt = sys.stdin.readline().strip() + if not txt: + # parent process probably ended + sys.exit(1) + [cmd, kwargs, cargs] = cPickle.loads(base64.b64decode(txt)) + cargs = cargs or {} + + if not 'close_fds' in kwargs: + # workers have no fds + kwargs['close_fds'] = False + + ret = 1 + out, err, ex, trace = (None, None, None, None) + try: + proc = subprocess.Popen(cmd, **kwargs) + try: + out, err = proc.communicate(**cargs) + except TimeoutExpired: + if kwargs.get('start_new_session') and hasattr(os, 'killpg'): + os.killpg(proc.pid, signal.SIGKILL) + else: + proc.kill() + out, err = proc.communicate() + exc = TimeoutExpired(proc.args, timeout=cargs['timeout'], output=out) + exc.stderr = err + raise exc + ret = proc.returncode + except Exception as e: + exc_type, exc_value, tb = sys.exc_info() + exc_lines = traceback.format_exception(exc_type, exc_value, tb) + trace = str(cmd) + '\n' + ''.join(exc_lines) + ex = e.__class__.__name__ + + # it is just text so maybe we do not need to pickle() + tmp = [ret, out, err, ex, trace] + obj = base64.b64encode(cPickle.dumps(tmp)) + sys.stdout.write(obj.decode()) + sys.stdout.write('\n') + sys.stdout.flush() + +while 1: + try: + run() + except KeyboardInterrupt: + break + diff --git a/web/index.html b/web/index.html new file mode 100644 index 0000000..388ec2c --- /dev/null +++ b/web/index.html @@ -0,0 +1,51 @@ +<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2//EN"> +<HTML> +<HEAD> +<TITLE>talloc</TITLE> +</HEAD> +<BODY BGCOLOR="#ffffff" TEXT="#000000" VLINK="#292555" LINK="#292555" ALINK="#cc0033"> + +<h1>talloc</h1> + +talloc is a hierarchical pool based memory allocator with +destructors. It is the core memory allocator used in Samba, and has +made a huge difference in many aspects of Samba4 development.<p> + +To get started with talloc, I would recommend you read the <a +href="http://samba.org/ftp/unpacked/talloc/talloc_guide.txt">talloc guide</a>. + +<h2>Download</h2> +You can download the latest releases of talloc from the <a +href="http://samba.org/ftp/talloc">talloc directory</a> on the samba public +source archive. + +<h2>Discussion and bug reports</h2> + +talloc does not currently have its own mailing list or bug tracking +system. For now, please use the <a +href="https://lists.samba.org/mailman/listinfo/samba-technical">samba-technical</a> +mailing list, and the <a href="http://bugzilla.samba.org/">Samba +bugzilla</a> bug tracking system. + +<h2>Development</h2> + +You can download the latest code either via git or rsync.<br> +<br> +To fetch via git see the following guide:<br> +<a href="http://wiki.samba.org/index.php/Using_Git_for_Samba_Development">Using Git for Samba Development</a><br> +Once you have cloned the tree switch to the master branch and cd into the lib/talloc directory.<br> +<br> +To fetch via rsync use this command: + +<pre> + rsync -Pavz samba.org::ftp/unpacked/standalone_projects/lib/talloc . +</pre> + +<hr> +<tiny> +<a href="http://samba.org/~tridge/">Andrew Tridgell</a><br> +talloc AT tridgell.net +</tiny> + +</BODY> +</HTML> @@ -0,0 +1,192 @@ +#!/usr/bin/env python + +APPNAME = 'talloc' +VERSION = '2.4.2' + +import os +import sys + +# find the buildtools directory +top = '.' +while not os.path.exists(top+'/buildtools') and len(top.split('/')) < 5: + top = top + '/..' +sys.path.insert(0, top + '/buildtools/wafsamba') + +out = 'bin' + +import wafsamba +from wafsamba import samba_dist, samba_utils +from waflib import Logs, Options, Context + +# setup what directories to put in a tarball +samba_dist.DIST_DIRS("""lib/talloc:. lib/replace:lib/replace +buildtools:buildtools third_party/waf:third_party/waf""") + + +def options(opt): + opt.BUILTIN_DEFAULT('replace') + opt.PRIVATE_EXTENSION_DEFAULT('talloc', noextension='talloc') + opt.RECURSE('lib/replace') + if opt.IN_LAUNCH_DIR(): + opt.add_option('--enable-talloc-compat1', + help=("Build talloc 1.x.x compat library [False]"), + action="store_true", dest='TALLOC_COMPAT1', default=False) + + +def configure(conf): + conf.RECURSE('lib/replace') + + conf.env.standalone_talloc = conf.IN_LAUNCH_DIR() + + conf.define('TALLOC_BUILD_VERSION_MAJOR', int(VERSION.split('.')[0])) + conf.define('TALLOC_BUILD_VERSION_MINOR', int(VERSION.split('.')[1])) + conf.define('TALLOC_BUILD_VERSION_RELEASE', int(VERSION.split('.')[2])) + + conf.env.TALLOC_COMPAT1 = False + if conf.env.standalone_talloc: + conf.env.TALLOC_COMPAT1 = Options.options.TALLOC_COMPAT1 + conf.env.PKGCONFIGDIR = '${LIBDIR}/pkgconfig' + conf.env.TALLOC_VERSION = VERSION + + conf.CHECK_XSLTPROC_MANPAGES() + + conf.CHECK_HEADERS('sys/auxv.h') + conf.CHECK_FUNCS('getauxval') + + conf.SAMBA_CONFIG_H() + + conf.SAMBA_CHECK_UNDEFINED_SYMBOL_FLAGS() + + conf.SAMBA_CHECK_PYTHON() + conf.SAMBA_CHECK_PYTHON_HEADERS() + + if not conf.env.standalone_talloc: + if conf.CHECK_BUNDLED_SYSTEM_PKG('talloc', minversion=VERSION, + implied_deps='replace'): + conf.define('USING_SYSTEM_TALLOC', 1) + + if conf.env.disable_python: + using_system_pytalloc_util = False + else: + using_system_pytalloc_util = True + name = 'pytalloc-util' + conf.all_envs['default']['PYTHON_SO_ABI_FLAG'] + if not conf.CHECK_BUNDLED_SYSTEM_PKG(name, minversion=VERSION, + implied_deps='talloc replace'): + using_system_pytalloc_util = False + + if using_system_pytalloc_util: + conf.define('USING_SYSTEM_PYTALLOC_UTIL', 1) + + +def build(bld): + bld.RECURSE('lib/replace') + + if bld.env.standalone_talloc: + private_library = False + + # should we also install the symlink to libtalloc1.so here? + bld.SAMBA_LIBRARY('talloc-compat1-%s' % (VERSION), + 'compat/talloc_compat1.c', + public_deps='talloc', + soname='libtalloc.so.1', + pc_files=[], + public_headers=[], + enabled=bld.env.TALLOC_COMPAT1) + + testsuite_deps = 'talloc' + if bld.CONFIG_SET('HAVE_PTHREAD'): + testsuite_deps += ' pthread' + + bld.SAMBA_BINARY('talloc_testsuite', + 'testsuite_main.c testsuite.c', + testsuite_deps, + install=False) + + bld.SAMBA_BINARY('talloc_test_magic_differs_helper', + 'test_magic_differs_helper.c', + 'talloc', install=False) + + else: + private_library = True + + if not bld.CONFIG_SET('USING_SYSTEM_TALLOC'): + + bld.SAMBA_LIBRARY('talloc', + 'talloc.c', + deps='replace', + provide_builtin_linking=True, + abi_directory='ABI', + abi_match='talloc* _talloc*', + hide_symbols=True, + vnum=VERSION, + public_headers=('' if private_library else 'talloc.h'), + pc_files='talloc.pc', + public_headers_install=not private_library, + private_library=private_library, + manpages='man/talloc.3') + + if not bld.CONFIG_SET('USING_SYSTEM_PYTALLOC_UTIL'): + name = bld.pyembed_libname('pytalloc-util') + + bld.SAMBA_LIBRARY(name, + source='pytalloc_util.c', + public_deps='talloc', + pyembed=True, + vnum=VERSION, + hide_symbols=True, + abi_directory='ABI', + abi_match='pytalloc_* _pytalloc_*', + private_library=private_library, + public_headers=('' if private_library else 'pytalloc.h'), + pc_files='pytalloc-util.pc', + enabled=bld.PYTHON_BUILD_IS_ENABLED() + ) + bld.SAMBA_PYTHON('pytalloc', + 'pytalloc.c', + deps='talloc ' + name, + enabled=bld.PYTHON_BUILD_IS_ENABLED(), + realname='talloc.so') + + bld.SAMBA_PYTHON('test_pytalloc', + 'test_pytalloc.c', + deps=name, + enabled=bld.PYTHON_BUILD_IS_ENABLED(), + realname='_test_pytalloc.so', + install=False) + + +def testonly(ctx): + '''run talloc testsuite''' + import samba_utils + + samba_utils.ADD_LD_LIBRARY_PATH('bin/shared') + samba_utils.ADD_LD_LIBRARY_PATH('bin/shared/private') + + cmd = os.path.join(Context.g_module.out, 'talloc_testsuite') + ret = samba_utils.RUN_COMMAND(cmd) + print("testsuite returned %d" % ret) + magic_helper_cmd = os.path.join(Context.g_module.out, 'talloc_test_magic_differs_helper') + magic_cmd = os.path.join(Context.g_module.top, 'lib', 'talloc', + 'test_magic_differs.sh') + if not os.path.exists(magic_cmd): + magic_cmd = os.path.join(Context.g_module.top, 'test_magic_differs.sh') + + magic_ret = samba_utils.RUN_COMMAND(magic_cmd + " " + magic_helper_cmd) + print("magic differs test returned %d" % magic_ret) + pyret = samba_utils.RUN_PYTHON_TESTS(['test_pytalloc.py']) + print("python testsuite returned %d" % pyret) + sys.exit(ret or magic_ret or pyret) + +# WAF doesn't build the unit tests for this, maybe because they don't link with talloc? +# This forces it +def test(ctx): + Options.commands.append('build') + Options.commands.append('testonly') + +def dist(): + '''makes a tarball for distribution''' + samba_dist.dist() + +def reconfigure(ctx): + '''reconfigure if config scripts have changed''' + samba_utils.reconfigure(ctx) |