この質問に触発されて、先日、マクロの他の「独創的な」使用法を実験していました。これまでに思いついたもの、パラメーターが特定の「タイプ」であることを保証するマクロのセットです。これらは、他のマクロの一部として使用できます。
#include <stdio.h>
// Only pointers can be dereferenced.
// This fails to compile if x is
// numeric type (or void*).
#define ACCEPT_PTR(x) \
((x) + sizeof(*(x)) * 0)
// sizeof() is evaulated at compile time and
// it will fail if the expression is non-const
// at compile time.
// (void*)&array == (void*)&array[0] is a
// compile-time const.
// (void*)&ptr == (void*)&ptr[0] needs the value of
// ptr and therefore isn't a compile-time const,
// same with (void*)&int == (void*)int.
#define ACCEPT_ARR(x) \
((x) + sizeof(struct { int not_an_array: ((void*)&(x) == &(x)[0]); }) * 0)
// ((x) + sizeof(char[(void*)&(x) == (void*)&(x)[0]]) * 0)
// x can be added to itself or multiplied only
// if it's a numerical type, pointers can't be added.
#define ACCEPT_NUM(x) \
((x) * 1)
// (((x) + (x)) - (x))
// Only integers can be shifted
// (% also applies to integers only).
// This will fail to compile if x isn't integer.
#define ACCEPT_INT(x) \
((x) << 0)
// ((x) + (x) % 2 * 0)
// x will be concatenated with "" at compile
// time only if it's a string literal. Comilation
// will fail if x isn't a string literal.
#define ACCEPT_STR(x) \
x ""
#define ACCEPT_LVAL(x) \
(*&(x))
int main(void)
{
int i = 42;
int* p = &i;
int a[1] = { 42 };
float f = 42.0;
ACCEPT_NUM(i);
ACCEPT_NUM(p[0]);
ACCEPT_NUM(a[0]);
// ACCEPT_NUM(p);
// ACCEPT_NUM(a);
// ACCEPT_NUM("42");
ACCEPT_INT(i);
ACCEPT_INT(p[0]);
ACCEPT_INT(a[0]);
ACCEPT_INT("a"[0]);
// ACCEPT_INT(p);
// ACCEPT_INT(a);
// ACCEPT_INT("42");
// ACCEPT_INT(f);
ACCEPT_PTR(&i);
ACCEPT_PTR(p);
ACCEPT_PTR(a);
ACCEPT_PTR(&a[0]);
ACCEPT_PTR("42");
ACCEPT_PTR(&"a"[0]);
// ACCEPT_PTR(i);
// ACCEPT_PTR(f);
// ACCEPT_ARR(a); // doesn't compile with OW :(
// ACCEPT_ARR(i);
// ACCEPT_ARR(p);
// ACCEPT_ARR("42"); // WTF?; compiles with gcc :(
// ACCEPT_ARR(f);
ACCEPT_STR("42");
// ACCEPT_STR(i);
// ACCEPT_STR(p);
// ACCEPT_STR(a);
// ACCEPT_STR(f);
ACCEPT_LVAL(i);
ACCEPT_LVAL(p);
ACCEPT_LVAL(p[0]);
ACCEPT_LVAL(a); // not exactly lval
ACCEPT_LVAL(a[0]);
// ACCEPT_LVAL("42"); // WTF?; compiles with gcc but not with OW :(
ACCEPT_LVAL(f);
// ACCEPT_LVAL(0);
// ACCEPT_LVAL(0.0);
// ACCEPT_LVAL('a');
return 0;
}