rename __GNU_INLINE__ to __GNU_INLINE_X86_32__

There is not only x86 in this world.
Simple sed job.
This commit is contained in:
notaz 2016-04-24 17:38:22 +03:00
parent 056d77f479
commit 4b5cc03bf0
18 changed files with 67 additions and 67 deletions

View File

@ -47,7 +47,7 @@ static inline __int64 ReadTSC_profile(void)
}
return mmRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__int64 mmRet;
__asm__ __volatile__ (
"rdtsc \n\t"

View File

@ -64,7 +64,7 @@ static inline __int64 ReadTSC(void)
}
return mmRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__int64 mmRet;
__asm__ __volatile__ (
"rdtsc \n\t"

View File

@ -134,21 +134,21 @@ MY_STATIC_ASSERT(size_tSize, sizeof(size_t) == sizeof(void*));
#endif
#endif
#if ((defined __GNUC__) && (!defined __GNU_INLINE__))
#define __GNU_INLINE__
#if ((defined __GNUC__) && (!defined __GNU_INLINE_X86__))
#define __GNU_INLINE_X86__
#endif
#if (defined __INTEL_COMPILER)
#if ((!defined __GNU_INLINE__) && (!defined __MSVC_INLINE__))
#error Please define __GNU_INLINE__ or __MSVC_INLINE__ with Intel C++.
#if ((!defined __GNU_INLINE_X86__) && (!defined __MSVC_INLINE__))
#error Please define __GNU_INLINE_X86__ or __MSVC_INLINE__ with Intel C++.
#endif
#if ((defined __GNU_INLINE__) && (defined __MSVC_INLINE__))
#error Define either __GNU_INLINE__ or __MSVC_INLINE__ with Intel C++.
#if ((defined __GNU_INLINE_X86__) && (defined __MSVC_INLINE__))
#error Define either __GNU_INLINE_X86__ or __MSVC_INLINE__ with Intel C++.
#endif
#endif
#if defined(__GNU_INLINE__) && defined(__i386__)
#if defined(__GNU_INLINE_X86__) && defined(__i386__)
#define FPU_REGS "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"
#define MMX_REGS "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"
#endif
@ -233,7 +233,7 @@ MY_STATIC_ASSERT(size_tSize, sizeof(size_t) == sizeof(void*));
#if (defined USE_PORTABLE_C)
// DG: according to http://blog.regehr.org/archives/1063 this is fast
return (ul<<bits) | (ul>>(-bits&31));
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// This, on the other hand, is wicked fast. :)
__asm__ __volatile__ (
"roll %%cl, %%eax \n\t"

View File

@ -148,7 +148,7 @@ static void DetectCPU(void)
mov dword ptr [ulFeatures], edx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// test MMX presence and update flag
__asm__ __volatile__ (
"pushl %%ebx \n\t"

View File

@ -347,7 +347,7 @@ COLOR MulColors( COLOR col1, COLOR col2)
}
return colRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
COLOR colRet;
__asm__ __volatile__ (
"pushl %%ebx \n\t"
@ -535,7 +535,7 @@ COLOR AddColors( COLOR col1, COLOR col2)
mov D [colRet],ebx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp;
__asm__ __volatile__ (
// if xbx is "r", gcc runs out of regs in -fPIC + -fno-omit-fp :(
@ -679,7 +679,7 @@ colSkip2:
colSkip1:
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
STUBBED("convert to inline asm.");
#else

View File

@ -225,7 +225,7 @@ __forceinline ULONG ByteSwap( ULONG ul)
}
return ulRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"bswapl %%eax \n\t"
: "=a" (ul)
@ -252,7 +252,7 @@ __forceinline ULONG rgba2argb( ULONG ul)
}
return ulRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG ulRet;
__asm__ __volatile__ (
"rorl $8, %%eax \n\t"
@ -287,7 +287,7 @@ __forceinline ULONG abgr2argb( COLOR col)
}
return ulRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG ulRet;
__asm__ __volatile__ (
"bswapl %%eax \n\t"
@ -323,7 +323,7 @@ inline void CopyLongs( ULONG *pulSrc, ULONG *pulDst, INDEX ctLongs)
rep movsd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// I haven't benchmarked it, but in many cases, memcpy() becomes an
// inline (asm?) macro on GNU platforms, so this might not be a
// speed gain at all over the USE_PORTABLE_C version.
@ -359,7 +359,7 @@ inline void StoreLongs( ULONG ulVal, ULONG *pulDst, INDEX ctLongs)
rep stosd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"cld \n\t"
"rep \n\t"

View File

@ -42,7 +42,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#define ASMOPT 0
#elif (defined __MSVC_INLINE__)
#define ASMOPT 1
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 1
#else
#define ASMOPT 0
@ -184,7 +184,7 @@ elemRest:
mov D [edi],eax
elemDone:
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"movl %[ctElems], %%ecx \n\t"
"movl %[piDst], %%edi \n\t"
@ -505,7 +505,7 @@ static void RSBinToGroups( ScenePolygon *pspoFirst)
mov D [_ctGroupsCount],eax
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"movl $2, %%eax \n\t"
"bsrl (%%esi), %%ecx \n\t"
@ -860,7 +860,7 @@ static void RSSetTextureCoords( ScenePolygon *pspoGroup, INDEX iLayer, INDEX iUn
// !!! FIXME: rcg11232001 This inline conversion is broken. Use the
// !!! FIXME: rcg11232001 C version for now with GCC.
#if ((ASMOPT == 1) && (!defined __GNU_INLINE__) && (!defined __INTEL_COMPILER))
#if ((ASMOPT == 1) && (!defined __GNU_INLINE_X86_32__) && (!defined __INTEL_COMPILER))
#if (defined __MSVC_INLINE__)
__asm {
mov esi,D [pspo]
@ -915,7 +915,7 @@ vtxLoop:
/*
// !!! FIXME: rcg11232001 This inline conversion is broken. Use the
// !!! FIXME: rcg11232001 C version for now on Linux.
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
STUBBED("debug this");
__asm__ __volatile__ (
"0: \n\t" // vtxLoop

View File

@ -95,7 +95,7 @@ pixLoop:
jnz pixLoop
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"movl %[pubTexture], %%esi \n\t"
"movl %[pixTextureSize], %%ecx \n\t"

View File

@ -216,7 +216,7 @@ void UploadTexture_OGL( ULONG *pulTexture, PIX pixSizeU, PIX pixSizeV,
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"pxor %%mm0,%%mm0 \n\t"
"movl %[pulSrc],%%esi \n\t"

View File

@ -297,7 +297,7 @@ pixLoopN:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"pxor %%mm0, %%mm0 \n\t"
"movl %[pulSrcMipmap], %%esi \n\t"
@ -428,7 +428,7 @@ halfEnd:
fullEnd:
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp, tmp2;
__asm__ __volatile__ (
"xorl %[xbx], %[xbx] \n\t"
@ -852,7 +852,7 @@ nextRowO:
emms;
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp;
__asm__ __volatile__ (
"movl %[pulSrc], %%esi \n\t"
@ -1046,7 +1046,7 @@ allDoneE:
emms;
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"pxor %%mm0, %%mm0 \n\t"
"movl %[pulDst], %%esi \n\t"
@ -2204,7 +2204,7 @@ lowerLoop:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
FB_pulSrc = pulSrc;
FB_pulDst = pulDst;

View File

@ -102,7 +102,7 @@ inline void glCOLOR( COLOR col)
mov dword ptr [col],eax
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"bswapl %%eax \n\t"
: "=a" (col)

View File

@ -36,7 +36,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#define ASMOPT 0
#elif (defined __MSVC_INLINE__)
#define ASMOPT 1
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 1
#else
#define ASMOPT 0
@ -1357,7 +1357,7 @@ pixLoop:
pop ebx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// rcg12152001 needed extra registers. :(
_slHeightMapStep_renderWater = slHeightMapStep;
_pixBaseWidth_renderWater = pixBaseWidth;
@ -1626,7 +1626,7 @@ pixLoop2:
pop ebx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"bsfl %[pixBaseWidth], %%eax \n\t"
"movl $32, %%edx \n\t"
@ -2146,7 +2146,7 @@ pixLoop4:
pop ebx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"bsfl %[pixBaseWidth], %%eax \n\t"
"movl $32, %%edx \n\t"
@ -2976,7 +2976,7 @@ pixDone:
pop ebx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"movl %[slColumnModulo], %%edx \n\t"
"movl %[slBufferMask], %%ecx \n\t"
@ -3119,7 +3119,7 @@ pixLoopF:
jnz rowLoopF
pop ebx
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
_pubHeat_RenderPlasmaFire = pubHeat; // ran out of registers. :/
__asm__ __volatile__ (
"movl %[slHeatRowStep], %%eax \n\t"

View File

@ -44,7 +44,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#define ASMOPT 0
#elif (defined __MSVC_INLINE__)
#define ASMOPT 1
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 1
#else
#define ASMOPT 0
@ -364,7 +364,7 @@ skipPixel:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2;
__asm__ __volatile__ (
// prepare interpolants
@ -576,7 +576,7 @@ skipPixel:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2;
__asm__ __volatile__ (
// prepare interpolants
@ -796,7 +796,7 @@ skipPixel:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2;
__asm__ __volatile__ (
// prepare interpolants
@ -1008,7 +1008,7 @@ skipPixel:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2;
__asm__ __volatile__ (
// prepare interpolants
@ -1201,7 +1201,7 @@ BOOL CLayerMixer::PrepareOneLayerPoint( CBrushShadowLayer *pbsl, BOOL bNoMask)
FLOAT fDL2oDV = fDDL2oDV + 2*(lm_vStepV%v00);
//_v00 = v00;
#if ((ASMOPT == 1) && (!defined __GNU_INLINE__))
#if ((ASMOPT == 1) && (!defined __GNU_INLINE_X86_32__))
#if (defined __MSVC_INLINE__)
__asm {
fld D [fDDL2oDU]
@ -1230,7 +1230,7 @@ BOOL CLayerMixer::PrepareOneLayerPoint( CBrushShadowLayer *pbsl, BOOL bNoMask)
fistp D [_slDDL2oDV]
fistp D [_slDDL2oDU]
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
STUBBED("inline asm.");
#else
#error Please write inline assembly for your platform.
@ -1321,7 +1321,7 @@ void CLayerMixer::AddOneLayerGradient( CGradientParameters &gp)
_pulLayer = lm_pulShadowMap;
FLOAT fStart = Clamp( fGr00-(fDGroDJ+fDGroDI)*0.5f, 0.0f, 1.0f);
#if ((ASMOPT == 1) && (!defined __GNU_INLINE__))
#if ((ASMOPT == 1) && (!defined __GNU_INLINE_X86_32__))
#if (defined __MSVC_INLINE__)
__int64 mmRowAdv;
SLONG fixGRow = (fGr00-(fDGroDJ+fDGroDI)*0.5f)*32767.0f; // 16:15
@ -1436,7 +1436,7 @@ rowNext:
rowDone:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
STUBBED("WRITE ME. Argh.");
@ -1565,7 +1565,7 @@ rowNext:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp;
__asm__ __volatile__ (
// prepare pointers and variables
@ -1665,7 +1665,7 @@ skipLight:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG tmp;
__asm__ __volatile__ (
// prepare pointers and variables
@ -1862,7 +1862,7 @@ void CLayerMixer::MixOneMipmap(CBrushShadowMap *pbsm, INDEX iMipmap)
rep stosd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG clob1, clob2, clob3;
__asm__ __volatile__ (
"cld \n\t"
@ -1967,7 +1967,7 @@ __forceinline void CLayerMixer::CopyShadowLayer(void)
mov edi,D [ebx].lm_pulShadowMap
rep movsd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG clob1, clob2, clob3;
__asm__ __volatile__ (
"cld \n\t"
@ -2006,7 +2006,7 @@ __forceinline void CLayerMixer::FillShadowLayer( COLOR col)
rep stosd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
ULONG clob1, clob2, clob3;
__asm__ __volatile__ (
"cld \n\t"

View File

@ -37,7 +37,7 @@ inline ULONG _control87(WORD newcw, WORD mask)
return(fpw);
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
inline ULONG _control87(WORD newcw, WORD mask)
{

View File

@ -325,7 +325,7 @@ inline SLONG FloatToInt( FLOAT f)
}
return slRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
SLONG slRet;
__asm__ __volatile__ (
"flds (%%eax) \n\t"
@ -355,7 +355,7 @@ inline FLOAT Log2( FLOAT f) {
}
return fRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
FLOAT fRet;
__asm__ __volatile__ (
"fld1 \n\t"
@ -402,7 +402,7 @@ inline SLONG FastLog2( SLONG x)
}
return slRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
SLONG slRet;
__asm__ __volatile__ (
"bsrl %%ecx, %%eax \n\t"
@ -435,7 +435,7 @@ printf("CHECK THIS: %s:%d\n", __FILE__, __LINE__);
}
return slRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
SLONG slRet;
__asm__ __volatile__ (
"bsrl %%ecx, %%eax \n\t"

View File

@ -42,7 +42,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#if (defined __MSVC_INLINE__)
#define ASMOPT 1
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 0 // !!! FIXME: rcg10112001 Write GCC inline asm versions...
#else
#define ASMOPT 0

View File

@ -123,7 +123,7 @@ static inline PIX PIXCoord(FLOAT f) // (f+0.9999f) or (ceil(f))
}
return pixRet;
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
PIX pixRet;
SLONG clobber;
__asm__ __volatile__ (

View File

@ -43,7 +43,7 @@ static CSoundData *psd;
// nasm on MacOS X is getting wrong addresses of external globals, so I have
// to define them in the .asm file...lame.
#ifdef __GNU_INLINE__
#ifdef __GNU_INLINE_X86_32__
#ifdef USE_PORTABLE_C
#define INASM
#else
@ -94,7 +94,7 @@ void ResetMixer( const SLONG *pslBuffer, const SLONG slBufferSize)
shl ecx,1 // *2 because of 32-bit src format
rep stosd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// !!! FIXME : rcg12172001 Is this REALLY any faster than memset()?
ULONG clob1, clob2;
__asm__ __volatile__ (
@ -131,7 +131,7 @@ void CopyMixerBuffer_stereo( const SLONG slSrcOffset, void *pDstBuffer, const SL
shr ecx,2 // bytes to samples per channel
rep movsd
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// !!! FIXME : rcg12172001 Is this REALLY any faster than memcpy()?
ULONG clob1, clob2, clob3;
__asm__ __volatile__ (
@ -184,7 +184,7 @@ copyLoop:
jnz copyLoop
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"movl %[pvMixerBuffer], %%esi \n\t"
"movl %[pDstBuffer], %%edi \n\t"
@ -250,7 +250,7 @@ copyLoop:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ (
"movl %[pvMixerBuffer], %%esi \n\t"
"movl %[pvMixerBuffer], %%edi \n\t"
@ -323,7 +323,7 @@ void NormalizeMixerBuffer( const FLOAT fNormStrength, const SLONG slBytes, FLOAT
}
#ifdef __GNU_INLINE__
#ifdef __GNU_INLINE_X86_32__
// These are implemented in an external NASM file.
extern "C" {
void MixStereo_asm(CSoundObject *pso);
@ -548,7 +548,7 @@ loopEnd:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// This is implemented in an external NASM file.
MixMono_asm(pso);
@ -778,7 +778,7 @@ loopEnd:
emms
}
#elif (defined __GNU_INLINE__)
#elif (defined __GNU_INLINE_X86_32__)
// This is implemented in an external NASM file.
MixStereo_asm(pso);