rename __GNU_INLINE__ to __GNU_INLINE_X86_32__

There is not only x86 in this world.
Simple sed job.
This commit is contained in:
notaz 2016-04-24 17:38:22 +03:00
parent 056d77f479
commit 4b5cc03bf0
18 changed files with 67 additions and 67 deletions

View File

@ -47,7 +47,7 @@ static inline __int64 ReadTSC_profile(void)
} }
return mmRet; return mmRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__int64 mmRet; __int64 mmRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"rdtsc \n\t" "rdtsc \n\t"

View File

@ -64,7 +64,7 @@ static inline __int64 ReadTSC(void)
} }
return mmRet; return mmRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__int64 mmRet; __int64 mmRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"rdtsc \n\t" "rdtsc \n\t"

View File

@ -134,21 +134,21 @@ MY_STATIC_ASSERT(size_tSize, sizeof(size_t) == sizeof(void*));
#endif #endif
#endif #endif
#if ((defined __GNUC__) && (!defined __GNU_INLINE__)) #if ((defined __GNUC__) && (!defined __GNU_INLINE_X86__))
#define __GNU_INLINE__ #define __GNU_INLINE_X86__
#endif #endif
#if (defined __INTEL_COMPILER) #if (defined __INTEL_COMPILER)
#if ((!defined __GNU_INLINE__) && (!defined __MSVC_INLINE__)) #if ((!defined __GNU_INLINE_X86__) && (!defined __MSVC_INLINE__))
#error Please define __GNU_INLINE__ or __MSVC_INLINE__ with Intel C++. #error Please define __GNU_INLINE_X86__ or __MSVC_INLINE__ with Intel C++.
#endif #endif
#if ((defined __GNU_INLINE__) && (defined __MSVC_INLINE__)) #if ((defined __GNU_INLINE_X86__) && (defined __MSVC_INLINE__))
#error Define either __GNU_INLINE__ or __MSVC_INLINE__ with Intel C++. #error Define either __GNU_INLINE_X86__ or __MSVC_INLINE__ with Intel C++.
#endif #endif
#endif #endif
#if defined(__GNU_INLINE__) && defined(__i386__) #if defined(__GNU_INLINE_X86__) && defined(__i386__)
#define FPU_REGS "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)" #define FPU_REGS "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)"
#define MMX_REGS "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7" #define MMX_REGS "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7"
#endif #endif
@ -233,7 +233,7 @@ MY_STATIC_ASSERT(size_tSize, sizeof(size_t) == sizeof(void*));
#if (defined USE_PORTABLE_C) #if (defined USE_PORTABLE_C)
// DG: according to http://blog.regehr.org/archives/1063 this is fast // DG: according to http://blog.regehr.org/archives/1063 this is fast
return (ul<<bits) | (ul>>(-bits&31)); return (ul<<bits) | (ul>>(-bits&31));
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// This, on the other hand, is wicked fast. :) // This, on the other hand, is wicked fast. :)
__asm__ __volatile__ ( __asm__ __volatile__ (
"roll %%cl, %%eax \n\t" "roll %%cl, %%eax \n\t"

View File

@ -148,7 +148,7 @@ static void DetectCPU(void)
mov dword ptr [ulFeatures], edx mov dword ptr [ulFeatures], edx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// test MMX presence and update flag // test MMX presence and update flag
__asm__ __volatile__ ( __asm__ __volatile__ (
"pushl %%ebx \n\t" "pushl %%ebx \n\t"

View File

@ -347,7 +347,7 @@ COLOR MulColors( COLOR col1, COLOR col2)
} }
return colRet; return colRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
COLOR colRet; COLOR colRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"pushl %%ebx \n\t" "pushl %%ebx \n\t"
@ -535,7 +535,7 @@ COLOR AddColors( COLOR col1, COLOR col2)
mov D [colRet],ebx mov D [colRet],ebx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp; ULONG tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
// if xbx is "r", gcc runs out of regs in -fPIC + -fno-omit-fp :( // if xbx is "r", gcc runs out of regs in -fPIC + -fno-omit-fp :(
@ -679,7 +679,7 @@ colSkip2:
colSkip1: colSkip1:
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
STUBBED("convert to inline asm."); STUBBED("convert to inline asm.");
#else #else

View File

@ -225,7 +225,7 @@ __forceinline ULONG ByteSwap( ULONG ul)
} }
return ulRet; return ulRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"bswapl %%eax \n\t" "bswapl %%eax \n\t"
: "=a" (ul) : "=a" (ul)
@ -252,7 +252,7 @@ __forceinline ULONG rgba2argb( ULONG ul)
} }
return ulRet; return ulRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG ulRet; ULONG ulRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"rorl $8, %%eax \n\t" "rorl $8, %%eax \n\t"
@ -287,7 +287,7 @@ __forceinline ULONG abgr2argb( COLOR col)
} }
return ulRet; return ulRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG ulRet; ULONG ulRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"bswapl %%eax \n\t" "bswapl %%eax \n\t"
@ -323,7 +323,7 @@ inline void CopyLongs( ULONG *pulSrc, ULONG *pulDst, INDEX ctLongs)
rep movsd rep movsd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// I haven't benchmarked it, but in many cases, memcpy() becomes an // I haven't benchmarked it, but in many cases, memcpy() becomes an
// inline (asm?) macro on GNU platforms, so this might not be a // inline (asm?) macro on GNU platforms, so this might not be a
// speed gain at all over the USE_PORTABLE_C version. // speed gain at all over the USE_PORTABLE_C version.
@ -359,7 +359,7 @@ inline void StoreLongs( ULONG ulVal, ULONG *pulDst, INDEX ctLongs)
rep stosd rep stosd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"cld \n\t" "cld \n\t"
"rep \n\t" "rep \n\t"

View File

@ -42,7 +42,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#define ASMOPT 0 #define ASMOPT 0
#elif (defined __MSVC_INLINE__) #elif (defined __MSVC_INLINE__)
#define ASMOPT 1 #define ASMOPT 1
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 1 #define ASMOPT 1
#else #else
#define ASMOPT 0 #define ASMOPT 0
@ -184,7 +184,7 @@ elemRest:
mov D [edi],eax mov D [edi],eax
elemDone: elemDone:
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[ctElems], %%ecx \n\t" "movl %[ctElems], %%ecx \n\t"
"movl %[piDst], %%edi \n\t" "movl %[piDst], %%edi \n\t"
@ -505,7 +505,7 @@ static void RSBinToGroups( ScenePolygon *pspoFirst)
mov D [_ctGroupsCount],eax mov D [_ctGroupsCount],eax
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl $2, %%eax \n\t" "movl $2, %%eax \n\t"
"bsrl (%%esi), %%ecx \n\t" "bsrl (%%esi), %%ecx \n\t"
@ -860,7 +860,7 @@ static void RSSetTextureCoords( ScenePolygon *pspoGroup, INDEX iLayer, INDEX iUn
// !!! FIXME: rcg11232001 This inline conversion is broken. Use the // !!! FIXME: rcg11232001 This inline conversion is broken. Use the
// !!! FIXME: rcg11232001 C version for now with GCC. // !!! FIXME: rcg11232001 C version for now with GCC.
#if ((ASMOPT == 1) && (!defined __GNU_INLINE__) && (!defined __INTEL_COMPILER)) #if ((ASMOPT == 1) && (!defined __GNU_INLINE_X86_32__) && (!defined __INTEL_COMPILER))
#if (defined __MSVC_INLINE__) #if (defined __MSVC_INLINE__)
__asm { __asm {
mov esi,D [pspo] mov esi,D [pspo]
@ -915,7 +915,7 @@ vtxLoop:
/* /*
// !!! FIXME: rcg11232001 This inline conversion is broken. Use the // !!! FIXME: rcg11232001 This inline conversion is broken. Use the
// !!! FIXME: rcg11232001 C version for now on Linux. // !!! FIXME: rcg11232001 C version for now on Linux.
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
STUBBED("debug this"); STUBBED("debug this");
__asm__ __volatile__ ( __asm__ __volatile__ (
"0: \n\t" // vtxLoop "0: \n\t" // vtxLoop

View File

@ -95,7 +95,7 @@ pixLoop:
jnz pixLoop jnz pixLoop
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[pubTexture], %%esi \n\t" "movl %[pubTexture], %%esi \n\t"
"movl %[pixTextureSize], %%ecx \n\t" "movl %[pixTextureSize], %%ecx \n\t"

View File

@ -216,7 +216,7 @@ void UploadTexture_OGL( ULONG *pulTexture, PIX pixSizeU, PIX pixSizeV,
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"pxor %%mm0,%%mm0 \n\t" "pxor %%mm0,%%mm0 \n\t"
"movl %[pulSrc],%%esi \n\t" "movl %[pulSrc],%%esi \n\t"

View File

@ -297,7 +297,7 @@ pixLoopN:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"pxor %%mm0, %%mm0 \n\t" "pxor %%mm0, %%mm0 \n\t"
"movl %[pulSrcMipmap], %%esi \n\t" "movl %[pulSrcMipmap], %%esi \n\t"
@ -428,7 +428,7 @@ halfEnd:
fullEnd: fullEnd:
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp, tmp2; ULONG tmp, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
"xorl %[xbx], %[xbx] \n\t" "xorl %[xbx], %[xbx] \n\t"
@ -852,7 +852,7 @@ nextRowO:
emms; emms;
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp; ULONG tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[pulSrc], %%esi \n\t" "movl %[pulSrc], %%esi \n\t"
@ -1046,7 +1046,7 @@ allDoneE:
emms; emms;
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"pxor %%mm0, %%mm0 \n\t" "pxor %%mm0, %%mm0 \n\t"
"movl %[pulDst], %%esi \n\t" "movl %[pulDst], %%esi \n\t"
@ -2204,7 +2204,7 @@ lowerLoop:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
FB_pulSrc = pulSrc; FB_pulSrc = pulSrc;
FB_pulDst = pulDst; FB_pulDst = pulDst;

View File

@ -102,7 +102,7 @@ inline void glCOLOR( COLOR col)
mov dword ptr [col],eax mov dword ptr [col],eax
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"bswapl %%eax \n\t" "bswapl %%eax \n\t"
: "=a" (col) : "=a" (col)

View File

@ -36,7 +36,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#define ASMOPT 0 #define ASMOPT 0
#elif (defined __MSVC_INLINE__) #elif (defined __MSVC_INLINE__)
#define ASMOPT 1 #define ASMOPT 1
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 1 #define ASMOPT 1
#else #else
#define ASMOPT 0 #define ASMOPT 0
@ -1357,7 +1357,7 @@ pixLoop:
pop ebx pop ebx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// rcg12152001 needed extra registers. :( // rcg12152001 needed extra registers. :(
_slHeightMapStep_renderWater = slHeightMapStep; _slHeightMapStep_renderWater = slHeightMapStep;
_pixBaseWidth_renderWater = pixBaseWidth; _pixBaseWidth_renderWater = pixBaseWidth;
@ -1626,7 +1626,7 @@ pixLoop2:
pop ebx pop ebx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"bsfl %[pixBaseWidth], %%eax \n\t" "bsfl %[pixBaseWidth], %%eax \n\t"
"movl $32, %%edx \n\t" "movl $32, %%edx \n\t"
@ -2146,7 +2146,7 @@ pixLoop4:
pop ebx pop ebx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"bsfl %[pixBaseWidth], %%eax \n\t" "bsfl %[pixBaseWidth], %%eax \n\t"
"movl $32, %%edx \n\t" "movl $32, %%edx \n\t"
@ -2976,7 +2976,7 @@ pixDone:
pop ebx pop ebx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[slColumnModulo], %%edx \n\t" "movl %[slColumnModulo], %%edx \n\t"
"movl %[slBufferMask], %%ecx \n\t" "movl %[slBufferMask], %%ecx \n\t"
@ -3119,7 +3119,7 @@ pixLoopF:
jnz rowLoopF jnz rowLoopF
pop ebx pop ebx
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
_pubHeat_RenderPlasmaFire = pubHeat; // ran out of registers. :/ _pubHeat_RenderPlasmaFire = pubHeat; // ran out of registers. :/
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[slHeatRowStep], %%eax \n\t" "movl %[slHeatRowStep], %%eax \n\t"

View File

@ -44,7 +44,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#define ASMOPT 0 #define ASMOPT 0
#elif (defined __MSVC_INLINE__) #elif (defined __MSVC_INLINE__)
#define ASMOPT 1 #define ASMOPT 1
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 1 #define ASMOPT 1
#else #else
#define ASMOPT 0 #define ASMOPT 0
@ -364,7 +364,7 @@ skipPixel:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2; ULONG tmp1, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
// prepare interpolants // prepare interpolants
@ -576,7 +576,7 @@ skipPixel:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2; ULONG tmp1, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
// prepare interpolants // prepare interpolants
@ -796,7 +796,7 @@ skipPixel:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2; ULONG tmp1, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
// prepare interpolants // prepare interpolants
@ -1008,7 +1008,7 @@ skipPixel:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp1, tmp2; ULONG tmp1, tmp2;
__asm__ __volatile__ ( __asm__ __volatile__ (
// prepare interpolants // prepare interpolants
@ -1201,7 +1201,7 @@ BOOL CLayerMixer::PrepareOneLayerPoint( CBrushShadowLayer *pbsl, BOOL bNoMask)
FLOAT fDL2oDV = fDDL2oDV + 2*(lm_vStepV%v00); FLOAT fDL2oDV = fDDL2oDV + 2*(lm_vStepV%v00);
//_v00 = v00; //_v00 = v00;
#if ((ASMOPT == 1) && (!defined __GNU_INLINE__)) #if ((ASMOPT == 1) && (!defined __GNU_INLINE_X86_32__))
#if (defined __MSVC_INLINE__) #if (defined __MSVC_INLINE__)
__asm { __asm {
fld D [fDDL2oDU] fld D [fDDL2oDU]
@ -1230,7 +1230,7 @@ BOOL CLayerMixer::PrepareOneLayerPoint( CBrushShadowLayer *pbsl, BOOL bNoMask)
fistp D [_slDDL2oDV] fistp D [_slDDL2oDV]
fistp D [_slDDL2oDU] fistp D [_slDDL2oDU]
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
STUBBED("inline asm."); STUBBED("inline asm.");
#else #else
#error Please write inline assembly for your platform. #error Please write inline assembly for your platform.
@ -1321,7 +1321,7 @@ void CLayerMixer::AddOneLayerGradient( CGradientParameters &gp)
_pulLayer = lm_pulShadowMap; _pulLayer = lm_pulShadowMap;
FLOAT fStart = Clamp( fGr00-(fDGroDJ+fDGroDI)*0.5f, 0.0f, 1.0f); FLOAT fStart = Clamp( fGr00-(fDGroDJ+fDGroDI)*0.5f, 0.0f, 1.0f);
#if ((ASMOPT == 1) && (!defined __GNU_INLINE__)) #if ((ASMOPT == 1) && (!defined __GNU_INLINE_X86_32__))
#if (defined __MSVC_INLINE__) #if (defined __MSVC_INLINE__)
__int64 mmRowAdv; __int64 mmRowAdv;
SLONG fixGRow = (fGr00-(fDGroDJ+fDGroDI)*0.5f)*32767.0f; // 16:15 SLONG fixGRow = (fGr00-(fDGroDJ+fDGroDI)*0.5f)*32767.0f; // 16:15
@ -1436,7 +1436,7 @@ rowNext:
rowDone: rowDone:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
STUBBED("WRITE ME. Argh."); STUBBED("WRITE ME. Argh.");
@ -1565,7 +1565,7 @@ rowNext:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp; ULONG tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
// prepare pointers and variables // prepare pointers and variables
@ -1665,7 +1665,7 @@ skipLight:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG tmp; ULONG tmp;
__asm__ __volatile__ ( __asm__ __volatile__ (
// prepare pointers and variables // prepare pointers and variables
@ -1862,7 +1862,7 @@ void CLayerMixer::MixOneMipmap(CBrushShadowMap *pbsm, INDEX iMipmap)
rep stosd rep stosd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG clob1, clob2, clob3; ULONG clob1, clob2, clob3;
__asm__ __volatile__ ( __asm__ __volatile__ (
"cld \n\t" "cld \n\t"
@ -1967,7 +1967,7 @@ __forceinline void CLayerMixer::CopyShadowLayer(void)
mov edi,D [ebx].lm_pulShadowMap mov edi,D [ebx].lm_pulShadowMap
rep movsd rep movsd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG clob1, clob2, clob3; ULONG clob1, clob2, clob3;
__asm__ __volatile__ ( __asm__ __volatile__ (
"cld \n\t" "cld \n\t"
@ -2006,7 +2006,7 @@ __forceinline void CLayerMixer::FillShadowLayer( COLOR col)
rep stosd rep stosd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
ULONG clob1, clob2, clob3; ULONG clob1, clob2, clob3;
__asm__ __volatile__ ( __asm__ __volatile__ (
"cld \n\t" "cld \n\t"

View File

@ -37,7 +37,7 @@ inline ULONG _control87(WORD newcw, WORD mask)
return(fpw); return(fpw);
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
inline ULONG _control87(WORD newcw, WORD mask) inline ULONG _control87(WORD newcw, WORD mask)
{ {

View File

@ -325,7 +325,7 @@ inline SLONG FloatToInt( FLOAT f)
} }
return slRet; return slRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
SLONG slRet; SLONG slRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"flds (%%eax) \n\t" "flds (%%eax) \n\t"
@ -355,7 +355,7 @@ inline FLOAT Log2( FLOAT f) {
} }
return fRet; return fRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
FLOAT fRet; FLOAT fRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"fld1 \n\t" "fld1 \n\t"
@ -402,7 +402,7 @@ inline SLONG FastLog2( SLONG x)
} }
return slRet; return slRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
SLONG slRet; SLONG slRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"bsrl %%ecx, %%eax \n\t" "bsrl %%ecx, %%eax \n\t"
@ -435,7 +435,7 @@ printf("CHECK THIS: %s:%d\n", __FILE__, __LINE__);
} }
return slRet; return slRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
SLONG slRet; SLONG slRet;
__asm__ __volatile__ ( __asm__ __volatile__ (
"bsrl %%ecx, %%eax \n\t" "bsrl %%ecx, %%eax \n\t"

View File

@ -42,7 +42,7 @@ with this program; if not, write to the Free Software Foundation, Inc.,
#if (defined __MSVC_INLINE__) #if (defined __MSVC_INLINE__)
#define ASMOPT 1 #define ASMOPT 1
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
#define ASMOPT 0 // !!! FIXME: rcg10112001 Write GCC inline asm versions... #define ASMOPT 0 // !!! FIXME: rcg10112001 Write GCC inline asm versions...
#else #else
#define ASMOPT 0 #define ASMOPT 0

View File

@ -123,7 +123,7 @@ static inline PIX PIXCoord(FLOAT f) // (f+0.9999f) or (ceil(f))
} }
return pixRet; return pixRet;
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
PIX pixRet; PIX pixRet;
SLONG clobber; SLONG clobber;
__asm__ __volatile__ ( __asm__ __volatile__ (

View File

@ -43,7 +43,7 @@ static CSoundData *psd;
// nasm on MacOS X is getting wrong addresses of external globals, so I have // nasm on MacOS X is getting wrong addresses of external globals, so I have
// to define them in the .asm file...lame. // to define them in the .asm file...lame.
#ifdef __GNU_INLINE__ #ifdef __GNU_INLINE_X86_32__
#ifdef USE_PORTABLE_C #ifdef USE_PORTABLE_C
#define INASM #define INASM
#else #else
@ -94,7 +94,7 @@ void ResetMixer( const SLONG *pslBuffer, const SLONG slBufferSize)
shl ecx,1 // *2 because of 32-bit src format shl ecx,1 // *2 because of 32-bit src format
rep stosd rep stosd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// !!! FIXME : rcg12172001 Is this REALLY any faster than memset()? // !!! FIXME : rcg12172001 Is this REALLY any faster than memset()?
ULONG clob1, clob2; ULONG clob1, clob2;
__asm__ __volatile__ ( __asm__ __volatile__ (
@ -131,7 +131,7 @@ void CopyMixerBuffer_stereo( const SLONG slSrcOffset, void *pDstBuffer, const SL
shr ecx,2 // bytes to samples per channel shr ecx,2 // bytes to samples per channel
rep movsd rep movsd
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// !!! FIXME : rcg12172001 Is this REALLY any faster than memcpy()? // !!! FIXME : rcg12172001 Is this REALLY any faster than memcpy()?
ULONG clob1, clob2, clob3; ULONG clob1, clob2, clob3;
__asm__ __volatile__ ( __asm__ __volatile__ (
@ -184,7 +184,7 @@ copyLoop:
jnz copyLoop jnz copyLoop
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[pvMixerBuffer], %%esi \n\t" "movl %[pvMixerBuffer], %%esi \n\t"
"movl %[pDstBuffer], %%edi \n\t" "movl %[pDstBuffer], %%edi \n\t"
@ -250,7 +250,7 @@ copyLoop:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
__asm__ __volatile__ ( __asm__ __volatile__ (
"movl %[pvMixerBuffer], %%esi \n\t" "movl %[pvMixerBuffer], %%esi \n\t"
"movl %[pvMixerBuffer], %%edi \n\t" "movl %[pvMixerBuffer], %%edi \n\t"
@ -323,7 +323,7 @@ void NormalizeMixerBuffer( const FLOAT fNormStrength, const SLONG slBytes, FLOAT
} }
#ifdef __GNU_INLINE__ #ifdef __GNU_INLINE_X86_32__
// These are implemented in an external NASM file. // These are implemented in an external NASM file.
extern "C" { extern "C" {
void MixStereo_asm(CSoundObject *pso); void MixStereo_asm(CSoundObject *pso);
@ -548,7 +548,7 @@ loopEnd:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// This is implemented in an external NASM file. // This is implemented in an external NASM file.
MixMono_asm(pso); MixMono_asm(pso);
@ -778,7 +778,7 @@ loopEnd:
emms emms
} }
#elif (defined __GNU_INLINE__) #elif (defined __GNU_INLINE_X86_32__)
// This is implemented in an external NASM file. // This is implemented in an external NASM file.
MixStereo_asm(pso); MixStereo_asm(pso);