ALIGNMENT must be a power of 2. */
#define ALIGN(ptr, ALIGNMENT) \
- ((POINTER_TYPE *) ((((EMACS_UINTPTR) (ptr)) + (ALIGNMENT) - 1) \
+ ((POINTER_TYPE *) ((((uintptr_t) (ptr)) + (ALIGNMENT) - 1) \
& ~((ALIGNMENT) - 1)))
#define ABLOCKS_BYTES (sizeof (struct ablocks) - BLOCK_PADDING)
#define ABLOCK_ABASE(block) \
- (((EMACS_UINTPTR) (block)->abase) <= (1 + 2 * ABLOCKS_SIZE) \
+ (((uintptr_t) (block)->abase) <= (1 + 2 * ABLOCKS_SIZE) \
? (struct ablocks *)(block) \
: (block)->abase)
#define ABLOCKS_BASE(abase) (abase)
#else
#define ABLOCKS_BASE(abase) \
- (1 & (EMACS_INTPTR) ABLOCKS_BUSY (abase) ? abase : ((void**)abase)[-1])
+ (1 & (intptr_t) ABLOCKS_BUSY (abase) ? abase : ((void**)abase)[-1])
#endif
/* The list of free ablock. */
if (!free_ablock)
{
int i;
- EMACS_INTPTR aligned; /* int gets warning casting to 64-bit pointer. */
+ intptr_t aligned; /* int gets warning casting to 64-bit pointer. */
#ifdef DOUG_LEA_MALLOC
/* Prevent mmap'ing the chunk. Lisp data may not be mmap'ed
}
ABLOCKS_BUSY (abase) = (struct ablocks *) aligned;
- eassert (0 == ((EMACS_UINTPTR) abase) % BLOCK_ALIGN);
+ eassert (0 == ((uintptr_t) abase) % BLOCK_ALIGN);
eassert (ABLOCK_ABASE (&abase->blocks[3]) == abase); /* 3 is arbitrary */
eassert (ABLOCK_ABASE (&abase->blocks[0]) == abase);
eassert (ABLOCKS_BASE (abase) == base);
- eassert (aligned == (EMACS_INTPTR) ABLOCKS_BUSY (abase));
+ eassert (aligned == (intptr_t) ABLOCKS_BUSY (abase));
}
abase = ABLOCK_ABASE (free_ablock);
ABLOCKS_BUSY (abase) =
- (struct ablocks *) (2 + (EMACS_INTPTR) ABLOCKS_BUSY (abase));
+ (struct ablocks *) (2 + (intptr_t) ABLOCKS_BUSY (abase));
val = free_ablock;
free_ablock = free_ablock->x.next_free;
if (!val && nbytes)
memory_full ();
- eassert (0 == ((EMACS_UINTPTR) val) % BLOCK_ALIGN);
+ eassert (0 == ((uintptr_t) val) % BLOCK_ALIGN);
return val;
}
free_ablock = ablock;
/* Update busy count. */
ABLOCKS_BUSY (abase) =
- (struct ablocks *) (-2 + (EMACS_INTPTR) ABLOCKS_BUSY (abase));
+ (struct ablocks *) (-2 + (intptr_t) ABLOCKS_BUSY (abase));
- if (2 > (EMACS_INTPTR) ABLOCKS_BUSY (abase))
+ if (2 > (intptr_t) ABLOCKS_BUSY (abase))
{ /* All the blocks are free. */
- int i = 0, aligned = (EMACS_INTPTR) ABLOCKS_BUSY (abase);
+ int i = 0, aligned = (intptr_t) ABLOCKS_BUSY (abase);
struct ablock **tem = &free_ablock;
struct ablock *atop = &abase->blocks[aligned ? ABLOCKS_SIZE : ABLOCKS_SIZE - 1];
eassert ((aligned & 1) == aligned);
eassert (i == (aligned ? ABLOCKS_SIZE : ABLOCKS_SIZE - 1));
#ifdef USE_POSIX_MEMALIGN
- eassert ((EMACS_UINTPTR) ABLOCKS_BASE (abase) % BLOCK_ALIGN == 0);
+ eassert ((uintptr_t) ABLOCKS_BASE (abase) % BLOCK_ALIGN == 0);
#endif
free (ABLOCKS_BASE (abase));
}
s = string_free_list;
while (s != NULL)
{
- if ((EMACS_UINTPTR) s < 1024)
+ if ((uintptr_t) s < 1024)
abort();
s = NEXT_FREE_LISP_STRING (s);
}
&= ~(1 << ((n) % (sizeof(int) * CHAR_BIT)))
#define FLOAT_BLOCK(fptr) \
- ((struct float_block *) (((EMACS_UINTPTR) (fptr)) & ~(BLOCK_ALIGN - 1)))
+ ((struct float_block *) (((uintptr_t) (fptr)) & ~(BLOCK_ALIGN - 1)))
#define FLOAT_INDEX(fptr) \
- ((((EMACS_UINTPTR) (fptr)) & (BLOCK_ALIGN - 1)) / sizeof (struct Lisp_Float))
+ ((((uintptr_t) (fptr)) & (BLOCK_ALIGN - 1)) / sizeof (struct Lisp_Float))
struct float_block
{
/ (sizeof (struct Lisp_Cons) * CHAR_BIT + 1))
#define CONS_BLOCK(fptr) \
- ((struct cons_block *) ((EMACS_UINTPTR) (fptr) & ~(BLOCK_ALIGN - 1)))
+ ((struct cons_block *) ((uintptr_t) (fptr) & ~(BLOCK_ALIGN - 1)))
#define CONS_INDEX(fptr) \
- (((EMACS_UINTPTR) (fptr) & (BLOCK_ALIGN - 1)) / sizeof (struct Lisp_Cons))
+ (((uintptr_t) (fptr) & (BLOCK_ALIGN - 1)) / sizeof (struct Lisp_Cons))
struct cons_block
{
struct mem_node *m;
/* Quickly rule out some values which can't point to Lisp data. */
- if ((EMACS_INTPTR) p %
+ if ((intptr_t) p %
#ifdef USE_LSB_TAG
8 /* USE_LSB_TAG needs Lisp data to be aligned on multiples of 8. */
#else
{
Lisp_Object end;
- XSETINT (end, (EMACS_INTPTR) (char *) sbrk (0) / 1024);
+ XSETINT (end, (intptr_t) (char *) sbrk (0) / 1024);
return end;
}