Commit 1c2f2a80 by Rémi Verschelde

typedefs: Cleanup unused macros and unnecessary checks

We now require a compiler with C++17 support, so we don't need to check for features added to GCC 5 or Clang 3.2. Clang builtin availability checks were unused anyway as Clang defines `__GNUC__` as it's also a GNU C implementation. Fixes #36986.
parent 5b97db32
...@@ -82,24 +82,25 @@ private: ...@@ -82,24 +82,25 @@ private:
} }
_FORCE_INLINE_ size_t _get_alloc_size(size_t p_elements) const { _FORCE_INLINE_ size_t _get_alloc_size(size_t p_elements) const {
//return nearest_power_of_2_templated(p_elements*sizeof(T)+sizeof(SafeRefCount)+sizeof(int));
return next_power_of_2(p_elements * sizeof(T)); return next_power_of_2(p_elements * sizeof(T));
} }
_FORCE_INLINE_ bool _get_alloc_size_checked(size_t p_elements, size_t *out) const { _FORCE_INLINE_ bool _get_alloc_size_checked(size_t p_elements, size_t *out) const {
#if defined(_add_overflow) && defined(_mul_overflow) #if defined(__GNUC__)
size_t o; size_t o;
size_t p; size_t p;
if (_mul_overflow(p_elements, sizeof(T), &o)) { if (__builtin_mul_overflow(p_elements, sizeof(T), &o)) {
*out = 0; *out = 0;
return false; return false;
} }
*out = next_power_of_2(o); *out = next_power_of_2(o);
if (_add_overflow(o, static_cast<size_t>(32), &p)) return false; //no longer allocated here if (__builtin_add_overflow(o, static_cast<size_t>(32), &p)) {
return false; // No longer allocated here.
}
return true; return true;
#else #else
// Speed is more important than correctness here, do the operations unchecked // Speed is more important than correctness here, do the operations unchecked
// and hope the best // and hope for the best.
*out = _get_alloc_size(p_elements); *out = _get_alloc_size(p_elements);
return true; return true;
#endif #endif
......
...@@ -502,11 +502,11 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li ...@@ -502,11 +502,11 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li
* *
* The current function returns `m_retval`. * The current function returns `m_retval`.
*/ */
#define ERR_FAIL_V(m_retval) \ #define ERR_FAIL_V(m_retval) \
if (1) { \ if (1) { \
_err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " __STR(m_retval)); \ _err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " _STR(m_retval)); \
return m_retval; \ return m_retval; \
} else \ } else \
((void)0) ((void)0)
/** /**
...@@ -515,11 +515,11 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li ...@@ -515,11 +515,11 @@ void _err_print_index_error(const char *p_function, const char *p_file, int p_li
* *
* Prints `m_msg`, and the current function returns `m_retval`. * Prints `m_msg`, and the current function returns `m_retval`.
*/ */
#define ERR_FAIL_V_MSG(m_retval, m_msg) \ #define ERR_FAIL_V_MSG(m_retval, m_msg) \
if (1) { \ if (1) { \
_err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " __STR(m_retval), DEBUG_STR(m_msg)); \ _err_print_error(FUNCTION_STR, __FILE__, __LINE__, "Method/Function Failed, returning: " _STR(m_retval), DEBUG_STR(m_msg)); \
return m_retval; \ return m_retval; \
} else \ } else \
((void)0) ((void)0)
/** /**
......
...@@ -37,10 +37,10 @@ ...@@ -37,10 +37,10 @@
#include "thirdparty/misc/pcg.h" #include "thirdparty/misc/pcg.h"
#if defined(__GNUC__) || (_llvm_has_builtin(__builtin_clz)) #if defined(__GNUC__)
#define CLZ32(x) __builtin_clz(x) #define CLZ32(x) __builtin_clz(x)
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#include "intrin.h" #include <intrin.h>
static int __bsr_clz32(uint32_t x) { static int __bsr_clz32(uint32_t x) {
unsigned long index; unsigned long index;
_BitScanReverse(&index, x); _BitScanReverse(&index, x);
...@@ -50,11 +50,11 @@ static int __bsr_clz32(uint32_t x) { ...@@ -50,11 +50,11 @@ static int __bsr_clz32(uint32_t x) {
#else #else
#endif #endif
#if defined(__GNUC__) || (_llvm_has_builtin(__builtin_ldexp) && _llvm_has_builtin(__builtin_ldexpf)) #if defined(__GNUC__)
#define LDEXP(s, e) __builtin_ldexp(s, e) #define LDEXP(s, e) __builtin_ldexp(s, e)
#define LDEXPF(s, e) __builtin_ldexpf(s, e) #define LDEXPF(s, e) __builtin_ldexpf(s, e)
#else #else
#include "math.h" #include <math.h>
#define LDEXP(s, e) ldexp(s, e) #define LDEXP(s, e) ldexp(s, e)
#define LDEXPF(s, e) ldexp(s, e) #define LDEXPF(s, e) ldexp(s, e)
#endif #endif
......
...@@ -37,60 +37,41 @@ ...@@ -37,60 +37,41 @@
* Basic definitions and simple functions to be used everywhere. * Basic definitions and simple functions to be used everywhere.
*/ */
// Include first in case the platform needs to pre-define/include some things.
#include "platform_config.h" #include "platform_config.h"
// Should be available everywhere.
#include "core/error_list.h"
#include "core/int_types.h"
// Turn argument to string constant:
// https://gcc.gnu.org/onlinedocs/cpp/Stringizing.html#Stringizing
#ifndef _STR #ifndef _STR
#define _STR(m_x) #m_x #define _STR(m_x) #m_x
#define _MKSTR(m_x) _STR(m_x) #define _MKSTR(m_x) _STR(m_x)
#endif #endif
//should always inline no matter what // Should always inline no matter what.
#ifndef _ALWAYS_INLINE_ #ifndef _ALWAYS_INLINE_
#if defined(__GNUC__)
#if defined(__GNUC__) && (__GNUC__ >= 4)
#define _ALWAYS_INLINE_ __attribute__((always_inline)) inline
#elif defined(__llvm__)
#define _ALWAYS_INLINE_ __attribute__((always_inline)) inline #define _ALWAYS_INLINE_ __attribute__((always_inline)) inline
#elif defined(_MSC_VER) #elif defined(_MSC_VER)
#define _ALWAYS_INLINE_ __forceinline #define _ALWAYS_INLINE_ __forceinline
#else #else
#define _ALWAYS_INLINE_ inline #define _ALWAYS_INLINE_ inline
#endif #endif
#endif #endif
//should always inline, except in some cases because it makes debugging harder // Should always inline, except in debug builds because it makes debugging harder.
#ifndef _FORCE_INLINE_ #ifndef _FORCE_INLINE_
#ifdef DISABLE_FORCED_INLINE #ifdef DISABLE_FORCED_INLINE
#define _FORCE_INLINE_ inline #define _FORCE_INLINE_ inline
#else #else
#define _FORCE_INLINE_ _ALWAYS_INLINE_ #define _FORCE_INLINE_ _ALWAYS_INLINE_
#endif #endif
#endif #endif
//custom, gcc-safe offsetof, because gcc complains a lot. // Windows badly defines a lot of stuff we'll never use. Undefine it.
template <class T>
T *_nullptr() {
T *t = NULL;
return t;
}
#define OFFSET_OF(st, m) \
((size_t)((char *)&(_nullptr<st>()->m) - (char *)0))
/**
* Some platforms (devices) don't define NULL
*/
#ifndef NULL
#define NULL 0
#endif
/**
* Windows badly defines a lot of stuff we'll never use. Undefine it.
*/
#ifdef _WIN32 #ifdef _WIN32
#undef min // override standard definition #undef min // override standard definition
#undef max // override standard definition #undef max // override standard definition
...@@ -105,18 +86,11 @@ T *_nullptr() { ...@@ -105,18 +86,11 @@ T *_nullptr() {
#undef CONNECT_DEFERRED // override from Windows SDK, clashes with Object enum #undef CONNECT_DEFERRED // override from Windows SDK, clashes with Object enum
#endif #endif
#include "core/int_types.h" // Generic ABS function, for math uses please use Math::abs.
#include "core/error_list.h"
/** Generic ABS function, for math uses please use Math::abs */
#ifndef ABS #ifndef ABS
#define ABS(m_v) (((m_v) < 0) ? (-(m_v)) : (m_v)) #define ABS(m_v) (((m_v) < 0) ? (-(m_v)) : (m_v))
#endif #endif
#define ABSDIFF(x, y) (((x) < (y)) ? ((y) - (x)) : ((x) - (y)))
#ifndef SGN #ifndef SGN
#define SGN(m_v) (((m_v) < 0) ? (-1.0) : (+1.0)) #define SGN(m_v) (((m_v) < 0) ? (-1.0) : (+1.0))
#endif #endif
...@@ -133,49 +107,24 @@ T *_nullptr() { ...@@ -133,49 +107,24 @@ T *_nullptr() {
#define CLAMP(m_a, m_min, m_max) (((m_a) < (m_min)) ? (m_min) : (((m_a) > (m_max)) ? m_max : m_a)) #define CLAMP(m_a, m_min, m_max) (((m_a) < (m_min)) ? (m_min) : (((m_a) > (m_max)) ? m_max : m_a))
#endif #endif
/** Generic swap template */ // Generic swap template.
#ifndef SWAP #ifndef SWAP
#define SWAP(m_x, m_y) __swap_tmpl((m_x), (m_y)) #define SWAP(m_x, m_y) __swap_tmpl((m_x), (m_y))
template <class T> template <class T>
inline void __swap_tmpl(T &x, T &y) { inline void __swap_tmpl(T &x, T &y) {
T aux = x; T aux = x;
x = y; x = y;
y = aux; y = aux;
} }
#endif // SWAP
#endif //swap /* Functions to handle powers of 2 and shifting. */
/* clang-format off */
#define HEX2CHR(m_hex) \
((m_hex >= '0' && m_hex <= '9') ? (m_hex - '0') : \
((m_hex >= 'A' && m_hex <= 'F') ? (10 + m_hex - 'A') : \
((m_hex >= 'a' && m_hex <= 'f') ? (10 + m_hex - 'a') : 0)))
/* clang-format on */
// Macro to check whether we are compiled by clang
// and we have a specific builtin
#if defined(__llvm__) && defined(__has_builtin)
#define _llvm_has_builtin(x) __has_builtin(x)
#else
#define _llvm_has_builtin(x) 0
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 5)) || _llvm_has_builtin(__builtin_mul_overflow)
#define _mul_overflow __builtin_mul_overflow
#endif
#if (defined(__GNUC__) && (__GNUC__ >= 5)) || _llvm_has_builtin(__builtin_add_overflow)
#define _add_overflow __builtin_add_overflow
#endif
/** Function to find the next power of 2 to an integer */
// Function to find the next power of 2 to an integer.
static _FORCE_INLINE_ unsigned int next_power_of_2(unsigned int x) { static _FORCE_INLINE_ unsigned int next_power_of_2(unsigned int x) {
if (x == 0) {
if (x == 0)
return 0; return 0;
}
--x; --x;
x |= x >> 1; x |= x >> 1;
...@@ -187,8 +136,8 @@ static _FORCE_INLINE_ unsigned int next_power_of_2(unsigned int x) { ...@@ -187,8 +136,8 @@ static _FORCE_INLINE_ unsigned int next_power_of_2(unsigned int x) {
return ++x; return ++x;
} }
// Function to find the previous power of 2 to an integer.
static _FORCE_INLINE_ unsigned int previous_power_of_2(unsigned int x) { static _FORCE_INLINE_ unsigned int previous_power_of_2(unsigned int x) {
x |= x >> 1; x |= x >> 1;
x |= x >> 2; x |= x >> 2;
x |= x >> 4; x |= x >> 4;
...@@ -197,40 +146,45 @@ static _FORCE_INLINE_ unsigned int previous_power_of_2(unsigned int x) { ...@@ -197,40 +146,45 @@ static _FORCE_INLINE_ unsigned int previous_power_of_2(unsigned int x) {
return x - (x >> 1); return x - (x >> 1);
} }
// Function to find the closest power of 2 to an integer.
static _FORCE_INLINE_ unsigned int closest_power_of_2(unsigned int x) { static _FORCE_INLINE_ unsigned int closest_power_of_2(unsigned int x) {
unsigned int nx = next_power_of_2(x); unsigned int nx = next_power_of_2(x);
unsigned int px = previous_power_of_2(x); unsigned int px = previous_power_of_2(x);
return (nx - x) > (x - px) ? px : nx; return (nx - x) > (x - px) ? px : nx;
} }
// We need this definition inside the function below. // Get a shift value from a power of 2.
static inline int get_shift_from_power_of_2(unsigned int p_pixel); static inline int get_shift_from_power_of_2(unsigned int p_bits) {
for (unsigned int i = 0; i < 32; i++) {
if (p_bits == (unsigned int)(1 << i)) {
return i;
}
}
return -1;
}
template <class T> template <class T>
static _FORCE_INLINE_ T nearest_power_of_2_templated(T x) { static _FORCE_INLINE_ T nearest_power_of_2_templated(T x) {
--x; --x;
// The number of operations on x is the base two logarithm // The number of operations on x is the base two logarithm
// of the p_number of bits in the type. Add three to account // of the number of bits in the type. Add three to account
// for sizeof(T) being in bytes. // for sizeof(T) being in bytes.
size_t num = get_shift_from_power_of_2(sizeof(T)) + 3; size_t num = get_shift_from_power_of_2(sizeof(T)) + 3;
// If the compiler is smart, it unrolls this loop // If the compiler is smart, it unrolls this loop.
// If its dumb, this is a bit slow. // If it's dumb, this is a bit slow.
for (size_t i = 0; i < num; i++) for (size_t i = 0; i < num; i++) {
x |= x >> (1 << i); x |= x >> (1 << i);
}
return ++x; return ++x;
} }
/** Function to find the nearest (bigger) power of 2 to an integer */ // Function to find the nearest (bigger) power of 2 to an integer.
static inline unsigned int nearest_shift(unsigned int p_number) { static inline unsigned int nearest_shift(unsigned int p_number) {
for (int i = 30; i >= 0; i--) { for (int i = 30; i >= 0; i--) {
if (p_number & (1 << i)) if (p_number & (1 << i))
return i + 1; return i + 1;
} }
...@@ -238,41 +192,20 @@ static inline unsigned int nearest_shift(unsigned int p_number) { ...@@ -238,41 +192,20 @@ static inline unsigned int nearest_shift(unsigned int p_number) {
return 0; return 0;
} }
/** get a shift value from a power of 2 */ // Swap 16, 32 and 64 bits value for endianness.
static inline int get_shift_from_power_of_2(unsigned int p_pixel) { #if defined(__GNUC__)
// return a GL_TEXTURE_SIZE_ENUM
for (unsigned int i = 0; i < 32; i++) {
if (p_pixel == (unsigned int)(1 << i))
return i;
}
return -1;
}
/** Swap 16 bits value for endianness */
#if defined(__GNUC__) || _llvm_has_builtin(__builtin_bswap16)
#define BSWAP16(x) __builtin_bswap16(x) #define BSWAP16(x) __builtin_bswap16(x)
#define BSWAP32(x) __builtin_bswap32(x)
#define BSWAP64(x) __builtin_bswap64(x)
#else #else
static inline uint16_t BSWAP16(uint16_t x) { static inline uint16_t BSWAP16(uint16_t x) {
return (x >> 8) | (x << 8); return (x >> 8) | (x << 8);
} }
#endif
/** Swap 32 bits value for endianness */
#if defined(__GNUC__) || _llvm_has_builtin(__builtin_bswap32)
#define BSWAP32(x) __builtin_bswap32(x)
#else
static inline uint32_t BSWAP32(uint32_t x) { static inline uint32_t BSWAP32(uint32_t x) {
return ((x << 24) | ((x << 8) & 0x00FF0000) | ((x >> 8) & 0x0000FF00) | (x >> 24)); return ((x << 24) | ((x << 8) & 0x00FF0000) | ((x >> 8) & 0x0000FF00) | (x >> 24));
} }
#endif
/** Swap 64 bits value for endianness */
#if defined(__GNUC__) || _llvm_has_builtin(__builtin_bswap64)
#define BSWAP64(x) __builtin_bswap64(x)
#else
static inline uint64_t BSWAP64(uint64_t x) { static inline uint64_t BSWAP64(uint64_t x) {
x = (x & 0x00000000FFFFFFFF) << 32 | (x & 0xFFFFFFFF00000000) >> 32; x = (x & 0x00000000FFFFFFFF) << 32 | (x & 0xFFFFFFFF00000000) >> 32;
x = (x & 0x0000FFFF0000FFFF) << 16 | (x & 0xFFFF0000FFFF0000) >> 16; x = (x & 0x0000FFFF0000FFFF) << 16 | (x & 0xFFFF0000FFFF0000) >> 16;
...@@ -281,40 +214,24 @@ static inline uint64_t BSWAP64(uint64_t x) { ...@@ -281,40 +214,24 @@ static inline uint64_t BSWAP64(uint64_t x) {
} }
#endif #endif
/** When compiling with RTTI, we can add an "extra" // Generic comparator used in Map, List, etc.
* layer of safeness in many operations, so dynamic_cast
* is used besides casting by enum.
*/
template <class T> template <class T>
struct Comparator { struct Comparator {
_ALWAYS_INLINE_ bool operator()(const T &p_a, const T &p_b) const { return (p_a < p_b); } _ALWAYS_INLINE_ bool operator()(const T &p_a, const T &p_b) const { return (p_a < p_b); }
}; };
// Global lock macro, relies on the static Mutex::_global_mutex.
void _global_lock(); void _global_lock();
void _global_unlock(); void _global_unlock();
struct _GlobalLock { struct _GlobalLock {
_GlobalLock() { _global_lock(); } _GlobalLock() { _global_lock(); }
~_GlobalLock() { _global_unlock(); } ~_GlobalLock() { _global_unlock(); }
}; };
#define GLOBAL_LOCK_FUNCTION _GlobalLock _global_lock_; #define GLOBAL_LOCK_FUNCTION _GlobalLock _global_lock_;
#ifdef NO_SAFE_CAST #if defined(__GNUC__)
#define SAFE_CAST static_cast
#else
#define SAFE_CAST dynamic_cast
#endif
#define MT_SAFE
#define __STRX(m_index) #m_index
#define __STR(m_index) __STRX(m_index)
#ifdef __GNUC__
#define likely(x) __builtin_expect(!!(x), 1) #define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0) #define unlikely(x) __builtin_expect(!!(x), 0)
#else #else
...@@ -330,14 +247,12 @@ struct _GlobalLock { ...@@ -330,14 +247,12 @@ struct _GlobalLock {
#define _PRINTF_FORMAT_ATTRIBUTE_2_3 #define _PRINTF_FORMAT_ATTRIBUTE_2_3
#endif #endif
/** This is needed due to a strange OpenGL API that expects a pointer // This is needed due to a strange OpenGL API that expects a pointer
* type for an argument that is actually an offset. // type for an argument that is actually an offset.
*/
#define CAST_INT_TO_UCHAR_PTR(ptr) ((uint8_t *)(uintptr_t)(ptr)) #define CAST_INT_TO_UCHAR_PTR(ptr) ((uint8_t *)(uintptr_t)(ptr))
// Home-made index sequence trick, so it can be used everywhere without the costly include of std::tuple. // Home-made index sequence trick, so it can be used everywhere without the costly include of std::tuple.
// https://stackoverflow.com/questions/15014096/c-index-of-type-during-variadic-template-expansion // https://stackoverflow.com/questions/15014096/c-index-of-type-during-variadic-template-expansion
template <size_t... Is> template <size_t... Is>
struct IndexSequence {}; struct IndexSequence {};
......
...@@ -2194,7 +2194,11 @@ void Collada::_merge_skeletons(VisualScene *p_vscene, Node *p_node) { ...@@ -2194,7 +2194,11 @@ void Collada::_merge_skeletons(VisualScene *p_vscene, Node *p_node) {
ERR_CONTINUE(!state.scene_map.has(nodeid)); //weird, it should have it... ERR_CONTINUE(!state.scene_map.has(nodeid)); //weird, it should have it...
NodeJoint *nj = SAFE_CAST<NodeJoint *>(state.scene_map[nodeid]); #ifdef NO_SAFE_CAST
NodeJoint *nj = static_cast<NodeJoint *>(state.scene_map[nodeid]);
#else
NodeJoint *nj = dynamic_cast<NodeJoint *>(state.scene_map[nodeid]);
#endif
ERR_CONTINUE(!nj); //broken collada ERR_CONTINUE(!nj); //broken collada
ERR_CONTINUE(!nj->owner); //weird, node should have a skeleton owner ERR_CONTINUE(!nj->owner); //weird, node should have a skeleton owner
...@@ -2366,7 +2370,11 @@ bool Collada::_move_geometry_to_skeletons(VisualScene *p_vscene, Node *p_node, L ...@@ -2366,7 +2370,11 @@ bool Collada::_move_geometry_to_skeletons(VisualScene *p_vscene, Node *p_node, L
String nodeid = ng->skeletons[0]; String nodeid = ng->skeletons[0];
ERR_FAIL_COND_V(!state.scene_map.has(nodeid), false); //weird, it should have it... ERR_FAIL_COND_V(!state.scene_map.has(nodeid), false); //weird, it should have it...
NodeJoint *nj = SAFE_CAST<NodeJoint *>(state.scene_map[nodeid]); #ifdef NO_SAFE_CAST
NodeJoint *nj = static_cast<NodeJoint *>(state.scene_map[nodeid]);
#else
NodeJoint *nj = dynamic_cast<NodeJoint *>(state.scene_map[nodeid]);
#endif
ERR_FAIL_COND_V(!nj, false); ERR_FAIL_COND_V(!nj, false);
ERR_FAIL_COND_V(!nj->owner, false); //weird, node should have a skeleton owner ERR_FAIL_COND_V(!nj->owner, false); //weird, node should have a skeleton owner
......
...@@ -2012,8 +2012,10 @@ void OS_X11::handle_key_event(XKeyEvent *p_event, bool p_echo) { ...@@ -2012,8 +2012,10 @@ void OS_X11::handle_key_event(XKeyEvent *p_event, bool p_echo) {
// is correct, but the xorg developers are // is correct, but the xorg developers are
// not very helpful today. // not very helpful today.
::Time tresh = ABSDIFF(peek_event.xkey.time, xkeyevent->time); #define ABSDIFF(x, y) (((x) < (y)) ? ((y) - (x)) : ((x) - (y)))
if (peek_event.type == KeyPress && tresh < 5) { ::Time threshold = ABSDIFF(peek_event.xkey.time, xkeyevent->time);
#undef ABSDIFF
if (peek_event.type == KeyPress && threshold < 5) {
KeySym rk; KeySym rk;
XLookupString((XKeyEvent *)&peek_event, str, 256, &rk, NULL); XLookupString((XKeyEvent *)&peek_event, str, 256, &rk, NULL);
if (rk == keysym_keycode) { if (rk == keysym_keycode) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment