Restored generic (non-SIMD) code

This commit is contained in:
Brian Harris
2013-05-29 13:12:13 -05:00
parent be311f42e1
commit 9c37079c16
23 changed files with 3328 additions and 24 deletions

View File

@@ -44,6 +44,7 @@ const float LCP_DELTA_FORCE_EPSILON = 1e-9f;
#define IGNORE_UNSATISFIABLE_VARIABLES
#if defined( ID_WIN_X86_SSE_ASM ) || defined( ID_WIN_X86_SSE_INTRIN )
ALIGN16( const __m128 SIMD_SP_zero ) = { 0.0f, 0.0f, 0.0f, 0.0f };
ALIGN16( const __m128 SIMD_SP_one ) = { 1.0f, 1.0f, 1.0f, 1.0f };
@@ -67,6 +68,8 @@ ALIGN16( const unsigned int SIMD_DW_four[4] ) = { 4, 4, 4, 4 };
ALIGN16( const unsigned int SIMD_DW_index[4] ) = { 0, 1, 2, 3 };
ALIGN16( const int SIMD_DW_not3[4] ) = { ~3, ~3, ~3, ~3 };
#endif
/*
========================
Multiply_SIMD
@@ -82,6 +85,7 @@ static void Multiply_SIMD( float * dst, const float * src0, const float * src1,
dst[i] = src0[i] * src1[i];
}
#ifdef ID_WIN_X86_SSE_INTRIN
for ( ; i + 4 <= count; i += 4 ) {
assert_16_byte_aligned( &dst[i] );
@@ -94,6 +98,20 @@ static void Multiply_SIMD( float * dst, const float * src0, const float * src1,
_mm_store_ps( dst + i, s0 );
}
#else
for ( ; i + 4 <= count; i += 4 ) {
assert_16_byte_aligned( &dst[i] );
assert_16_byte_aligned( &src0[i] );
assert_16_byte_aligned( &src1[i] );
dst[i+0] = src0[i+0] * src1[i+0];
dst[i+1] = src0[i+1] * src1[i+1];
dst[i+2] = src0[i+2] * src1[i+2];
dst[i+3] = src0[i+3] * src1[i+3];
}
#endif
for ( ; i < count; i++ ) {
dst[i] = src0[i] * src1[i];
@@ -115,6 +133,7 @@ static void MultiplyAdd_SIMD( float * dst, const float constant, const float * s
dst[i] += constant * src[i];
}
#ifdef ID_WIN_X86_SSE_INTRIN
__m128 c = _mm_load1_ps( & constant );
for ( ; i + 4 <= count; i += 4 ) {
@@ -127,6 +146,19 @@ static void MultiplyAdd_SIMD( float * dst, const float constant, const float * s
_mm_store_ps( dst + i, s );
}
#else
for ( ; i + 4 <= count; i += 4 ) {
assert_16_byte_aligned( &src[i] );
assert_16_byte_aligned( &dst[i] );
dst[i+0] += constant * src[i+0];
dst[i+1] += constant * src[i+1];
dst[i+2] += constant * src[i+2];
dst[i+3] += constant * src[i+3];
}
#endif
for ( ; i < count; i++ ) {
dst[i] += constant * src[i];
@@ -144,7 +176,7 @@ static float DotProduct_SIMD( const float * src0, const float * src1, const int
assert_16_byte_aligned( src0 );
assert_16_byte_aligned( src1 );
#ifndef _lint
#ifdef ID_WIN_X86_SSE_INTRIN
__m128 sum = (__m128 &) SIMD_SP_zero;
int i = 0;
@@ -266,7 +298,7 @@ static void LowerTriangularSolve_SIMD( const idMatX & L, float * x, const float
int i = skip;
#ifndef _lint
#ifdef ID_WIN_X86_SSE_INTRIN
// work up to a multiple of 4 rows
for ( ; ( i & 3 ) != 0 && i < n; i++ ) {
@@ -520,7 +552,7 @@ static void LowerTriangularSolveTranspose_SIMD( const idMatX & L, float * x, con
const float * lptr = L.ToFloatPtr() + m * nc + m - 4;
float * xptr = x + m;
#ifndef _lint
#ifdef ID_WIN_X86_SSE2_INTRIN
// process 4 rows at a time
for ( int i = m; i >= 4; i -= 4 ) {
@@ -850,7 +882,7 @@ static bool LDLT_Factor_SIMD( idMatX & mat, idVecX & invDiag, const int n ) {
mptr[j*nc+3] = ( mptr[j*nc+3] - v[0] * mptr[j*nc+0] - v[1] * mptr[j*nc+1] - v[2] * mptr[j*nc+2] ) * d;
}
#ifndef _lint
#ifdef ID_WIN_X86_SSE2_INTRIN
__m128 vzero = _mm_setzero_ps();
for ( int i = 4; i < n; i += 4 ) {
@@ -1210,6 +1242,7 @@ static void GetMaxStep_SIMD( const float * f, const float * a, const float * del
const float * lo, const float * hi, const int * side, int numUnbounded, int numClamped,
int d, float dir, float & maxStep, int & limit, int & limitSide ) {
#ifdef ID_WIN_X86_SSE2_INTRIN
__m128 vMaxStep;
__m128i vLimit;
@@ -1332,6 +1365,65 @@ static void GetMaxStep_SIMD( const float * f, const float * a, const float * del
_mm_store_ss( & maxStep, vMaxStep );
limit = _mm_cvtsi128_si32( vLimit );
limitSide = _mm_cvtsi128_si32( vLimitSide );
#else
// default to a full step for the current variable
{
float negAccel = -a[d];
float deltaAccel = delta_a[d];
int m0 = ( fabs( deltaAccel ) > LCP_DELTA_ACCEL_EPSILON );
float step = negAccel / ( m0 ? deltaAccel : 1.0f );
maxStep = m0 ? step : 0.0f;
limit = d;
limitSide = 0;
}
// test the current variable
{
float deltaForce = dir;
float forceLimit = ( deltaForce < 0.0f ) ? lo[d] : hi[d];
float step = ( forceLimit - f[d] ) / deltaForce;
int setSide = ( deltaForce < 0.0f ) ? -1 : 1;
int m0 = ( fabs( deltaForce ) > LCP_DELTA_FORCE_EPSILON );
int m1 = ( fabs( forceLimit ) != idMath::INFINITY );
int m2 = ( step < maxStep );
int m3 = ( m0 & m1 & m2 );
maxStep = m3 ? step : maxStep;
limit = m3 ? d : limit;
limitSide = m3 ? setSide : limitSide;
}
// test the clamped bounded variables
for ( int i = numUnbounded; i < numClamped; i++ ) {
float deltaForce = delta_f[i];
float forceLimit = ( deltaForce < 0.0f ) ? lo[i] : hi[i];
int m0 = ( fabs( deltaForce ) > LCP_DELTA_FORCE_EPSILON );
float step = ( forceLimit - f[i] ) / ( m0 ? deltaForce : 1.0f );
int setSide = ( deltaForce < 0.0f ) ? -1 : 1;
int m1 = ( fabs( forceLimit ) != idMath::INFINITY );
int m2 = ( step < maxStep );
int m3 = ( m0 & m1 & m2 );
maxStep = m3 ? step : maxStep;
limit = m3 ? i : limit;
limitSide = m3 ? setSide : limitSide;
}
// test the not clamped bounded variables
for ( int i = numClamped; i < d; i++ ) {
float negAccel = -a[i];
float deltaAccel = delta_a[i];
int m0 = ( side[i] * deltaAccel > LCP_DELTA_ACCEL_EPSILON );
float step = negAccel / ( m0 ? deltaAccel : 1.0f );
int m1 = ( lo[i] < -LCP_BOUND_EPSILON || hi[i] > LCP_BOUND_EPSILON );
int m2 = ( step < maxStep );
int m3 = ( m0 & m1 & m2 );
maxStep = m3 ? step : maxStep;
limit = m3 ? i : limit;
limitSide = m3 ? 0 : limitSide;
}
#endif
}
/*

View File

@@ -171,6 +171,7 @@ void idMatX::CopyLowerToUpperTriangle() {
assert( ( GetNumColumns() & 3 ) == 0 );
assert( GetNumColumns() >= GetNumRows() );
#ifdef ID_WIN_X86_SSE_INTRIN
const int n = GetNumColumns();
const int m = GetNumRows();
@@ -307,6 +308,20 @@ void idMatX::CopyLowerToUpperTriangle() {
_mm_store_ps( basePtr + n0, r0 );
}
#else
const int n = GetNumColumns();
const int m = GetNumRows();
for ( int i = 0; i < m; i++ ) {
const float * __restrict ptr = ToFloatPtr() + ( i + 1 ) * n + i;
float * __restrict dstPtr = ToFloatPtr() + i * n;
for ( int j = i + 1; j < m; j++ ) {
dstPtr[j] = ptr[0];
ptr += n;
}
}
#endif
#ifdef _DEBUG
for ( int i = 0; i < numRows; i++ ) {

View File

@@ -389,7 +389,7 @@ idMatX::operator=
ID_INLINE idMatX &idMatX::operator=( const idMatX &a ) {
SetSize( a.numRows, a.numColumns );
int s = a.numRows * a.numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( mat + i, _mm_load_ps( a.mat + i ) );
}
@@ -410,7 +410,7 @@ ID_INLINE idMatX idMatX::operator*( const float a ) const {
m.SetTempSize( numRows, numColumns );
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
__m128 va = _mm_load1_ps( & a );
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( m.mat + i, _mm_mul_ps( _mm_load_ps( mat + i ), va ) );
@@ -462,7 +462,7 @@ ID_INLINE idMatX idMatX::operator+( const idMatX &a ) const {
assert( numRows == a.numRows && numColumns == a.numColumns );
m.SetTempSize( numRows, numColumns );
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( m.mat + i, _mm_add_ps( _mm_load_ps( mat + i ), _mm_load_ps( a.mat + i ) ) );
}
@@ -485,7 +485,7 @@ ID_INLINE idMatX idMatX::operator-( const idMatX &a ) const {
assert( numRows == a.numRows && numColumns == a.numColumns );
m.SetTempSize( numRows, numColumns );
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( m.mat + i, _mm_sub_ps( _mm_load_ps( mat + i ), _mm_load_ps( a.mat + i ) ) );
}
@@ -504,7 +504,7 @@ idMatX::operator*=
*/
ID_INLINE idMatX &idMatX::operator*=( const float a ) {
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
__m128 va = _mm_load1_ps( & a );
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( mat + i, _mm_mul_ps( _mm_load_ps( mat + i ), va ) );
@@ -537,7 +537,7 @@ idMatX::operator+=
ID_INLINE idMatX &idMatX::operator+=( const idMatX &a ) {
assert( numRows == a.numRows && numColumns == a.numColumns );
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( mat + i, _mm_add_ps( _mm_load_ps( mat + i ), _mm_load_ps( a.mat + i ) ) );
}
@@ -558,7 +558,7 @@ idMatX::operator-=
ID_INLINE idMatX &idMatX::operator-=( const idMatX &a ) {
assert( numRows == a.numRows && numColumns == a.numColumns );
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( mat + i, _mm_sub_ps( _mm_load_ps( mat + i ), _mm_load_ps( a.mat + i ) ) );
}
@@ -744,7 +744,7 @@ idMatX::Zero
*/
ID_INLINE void idMatX::Zero() {
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( mat + i, _mm_setzero_ps() );
}
@@ -838,7 +838,7 @@ idMatX::Negate
*/
ID_INLINE void idMatX::Negate() {
int s = numRows * numColumns;
#ifdef MATX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(MATX_SIMD)
ALIGN16( const unsigned int signBit[4] ) = { IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK };
for ( int i = 0; i < s; i += 4 ) {
_mm_store_ps( mat + i, _mm_xor_ps( _mm_load_ps( mat + i ), (__m128 &) signBit[0] ) );

View File

@@ -51,6 +51,7 @@ const float idMath::INFINITY = 1e30f;
const float idMath::FLT_EPSILON = 1.192092896e-07f;
const float idMath::FLT_SMALLEST_NON_DENORMAL = * reinterpret_cast< const float * >( & SMALLEST_NON_DENORMAL ); // 1.1754944e-038f
#if defined( ID_WIN_X86_SSE_INTRIN )
const __m128 idMath::SIMD_SP_zero = { 0.0f, 0.0f, 0.0f, 0.0f };
const __m128 idMath::SIMD_SP_255 = { 255.0f, 255.0f, 255.0f, 255.0f };
const __m128 idMath::SIMD_SP_min_char = { -128.0f, -128.0f, -128.0f, -128.0f };
@@ -61,6 +62,7 @@ const __m128 idMath::SIMD_SP_smallestNonDenorm = { FLT_SMALLEST_NON_DENORMAL, FL
const __m128 idMath::SIMD_SP_tiny = { 1e-4f, 1e-4f, 1e-4f, 1e-4f };
const __m128 idMath::SIMD_SP_rsqrt_c0 = { 3.0f, 3.0f, 3.0f, 3.0f };
const __m128 idMath::SIMD_SP_rsqrt_c1 = { -0.5f, -0.5f, -0.5f, -0.5f };
#endif
bool idMath::initialized = false;
dword idMath::iSqrt[SQRT_TABLE_SIZE]; // inverse square root lookup table

View File

@@ -419,6 +419,7 @@ public:
static const float FLT_EPSILON; // smallest positive number such that 1.0+FLT_EPSILON != 1.0
static const float FLT_SMALLEST_NON_DENORMAL; // smallest non-denormal 32-bit floating point value
#if defined( ID_WIN_X86_SSE_INTRIN )
static const __m128 SIMD_SP_zero;
static const __m128 SIMD_SP_255;
static const __m128 SIMD_SP_min_char;
@@ -429,6 +430,7 @@ public:
static const __m128 SIMD_SP_tiny;
static const __m128 SIMD_SP_rsqrt_c0;
static const __m128 SIMD_SP_rsqrt_c1;
#endif
private:
enum {
@@ -460,9 +462,15 @@ idMath::InvSqrt
========================
*/
ID_INLINE float idMath::InvSqrt( float x ) {
#ifdef ID_WIN_X86_SSE_INTRIN
return ( x > FLT_SMALLEST_NON_DENORMAL ) ? sqrtf( 1.0f / x ) : INFINITY;
#else
return ( x > FLT_SMALLEST_NON_DENORMAL ) ? sqrtf( 1.0f / x ) : INFINITY;
#endif
}
/*
@@ -471,9 +479,15 @@ idMath::InvSqrt16
========================
*/
ID_INLINE float idMath::InvSqrt16( float x ) {
#ifdef ID_WIN_X86_SSE_INTRIN
return ( x > FLT_SMALLEST_NON_DENORMAL ) ? sqrtf( 1.0f / x ) : INFINITY;
#else
return ( x > FLT_SMALLEST_NON_DENORMAL ) ? sqrtf( 1.0f / x ) : INFINITY;
#endif
}
/*
@@ -482,7 +496,11 @@ idMath::Sqrt
========================
*/
ID_INLINE float idMath::Sqrt( float x ) {
#ifdef ID_WIN_X86_SSE_INTRIN
return ( x >= 0.0f ) ? x * InvSqrt( x ) : 0.0f;
#else
return ( x >= 0.0f ) ? sqrtf( x ) : 0.0f;
#endif
}
/*
@@ -491,7 +509,11 @@ idMath::Sqrt16
========================
*/
ID_INLINE float idMath::Sqrt16( float x ) {
#ifdef ID_WIN_X86_SSE_INTRIN
return ( x >= 0.0f ) ? x * InvSqrt16( x ) : 0.0f;
#else
return ( x >= 0.0f ) ? sqrtf( x ) : 0.0f;
#endif
}
/*
@@ -601,6 +623,7 @@ idMath::SinCos
========================
*/
ID_INLINE void idMath::SinCos( float a, float &s, float &c ) {
#if defined( ID_WIN_X86_ASM )
_asm {
fld a
fsincos
@@ -609,6 +632,10 @@ ID_INLINE void idMath::SinCos( float a, float &s, float &c ) {
fstp dword ptr [ecx]
fstp dword ptr [edx]
}
#else
s = sinf( a );
c = cosf( a );
#endif
}
/*
@@ -1128,11 +1155,24 @@ idMath::Ftoi
========================
*/
ID_INLINE int idMath::Ftoi( float f ) {
#ifdef ID_WIN_X86_SSE_INTRIN
// If a converted result is larger than the maximum signed doubleword integer,
// the floating-point invalid exception is raised, and if this exception is masked,
// the indefinite integer value (80000000H) is returned.
__m128 x = _mm_load_ss( &f );
return _mm_cvttss_si32( x );
#elif 0 // round chop (C/C++ standard)
int i, s, e, m, shift;
i = *reinterpret_cast<int *>(&f);
s = i >> IEEE_FLT_SIGN_BIT;
e = ( ( i >> IEEE_FLT_MANTISSA_BITS ) & ( ( 1 << IEEE_FLT_EXPONENT_BITS ) - 1 ) ) - IEEE_FLT_EXPONENT_BIAS;
m = ( i & ( ( 1 << IEEE_FLT_MANTISSA_BITS ) - 1 ) ) | ( 1 << IEEE_FLT_MANTISSA_BITS );
shift = e - IEEE_FLT_MANTISSA_BITS;
return ( ( ( ( m >> -shift ) | ( m << shift ) ) & ~( e >> INT32_SIGN_BIT ) ) ^ s ) - s;
#else
// If a converted result is larger than the maximum signed doubleword integer the result is undefined.
return C_FLOAT_TO_INT( f );
#endif
}
/*
@@ -1141,10 +1181,21 @@ idMath::Ftoi8
========================
*/
ID_INLINE char idMath::Ftoi8( float f ) {
#ifdef ID_WIN_X86_SSE_INTRIN
__m128 x = _mm_load_ss( &f );
x = _mm_max_ss( x, SIMD_SP_min_char );
x = _mm_min_ss( x, SIMD_SP_max_char );
return static_cast<char>( _mm_cvttss_si32( x ) );
#else
// The converted result is clamped to the range [-128,127].
int i = C_FLOAT_TO_INT( f );
if ( i < -128 ) {
return -128;
} else if ( i > 127 ) {
return 127;
}
return static_cast<char>( i );
#endif
}
/*
@@ -1153,10 +1204,21 @@ idMath::Ftoi16
========================
*/
ID_INLINE short idMath::Ftoi16( float f ) {
#ifdef ID_WIN_X86_SSE_INTRIN
__m128 x = _mm_load_ss( &f );
x = _mm_max_ss( x, SIMD_SP_min_short );
x = _mm_min_ss( x, SIMD_SP_max_short );
return static_cast<short>( _mm_cvttss_si32( x ) );
#else
// The converted result is clamped to the range [-32768,32767].
int i = C_FLOAT_TO_INT( f );
if ( i < -32768 ) {
return -32768;
} else if ( i > 32767 ) {
return 32767;
}
return static_cast<short>( i );
#endif
}
/*
@@ -1183,12 +1245,23 @@ idMath::Ftob
========================
*/
ID_INLINE byte idMath::Ftob( float f ) {
#ifdef ID_WIN_X86_SSE_INTRIN
// If a converted result is negative the value (0) is returned and if the
// converted result is larger than the maximum byte the value (255) is returned.
__m128 x = _mm_load_ss( &f );
x = _mm_max_ss( x, SIMD_SP_zero );
x = _mm_min_ss( x, SIMD_SP_255 );
return static_cast<byte>( _mm_cvttss_si32( x ) );
#else
// The converted result is clamped to the range [0,255].
int i = C_FLOAT_TO_INT( f );
if ( i < 0 ) {
return 0;
} else if ( i > 255 ) {
return 255;
}
return static_cast<byte>( i );
#endif
}
/*

View File

@@ -213,7 +213,7 @@ ID_INLINE idVecX idVecX::operator-() const {
idVecX m;
m.SetTempSize( size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
ALIGN16( unsigned int signBit[4] ) = { IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK };
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( m.p + i, _mm_xor_ps( _mm_load_ps( p + i ), (__m128 &) signBit[0] ) );
@@ -233,7 +233,7 @@ idVecX::operator=
*/
ID_INLINE idVecX &idVecX::operator=( const idVecX &a ) {
SetSize( a.size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < a.size; i += 4 ) {
_mm_store_ps( p + i, _mm_load_ps( a.p + i ) );
}
@@ -254,7 +254,7 @@ ID_INLINE idVecX idVecX::operator+( const idVecX &a ) const {
assert( size == a.size );
m.SetTempSize( size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( m.p + i, _mm_add_ps( _mm_load_ps( p + i ), _mm_load_ps( a.p + i ) ) );
}
@@ -276,7 +276,7 @@ ID_INLINE idVecX idVecX::operator-( const idVecX &a ) const {
assert( size == a.size );
m.SetTempSize( size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( m.p + i, _mm_sub_ps( _mm_load_ps( p + i ), _mm_load_ps( a.p + i ) ) );
}
@@ -295,7 +295,7 @@ idVecX::operator+=
*/
ID_INLINE idVecX &idVecX::operator+=( const idVecX &a ) {
assert( size == a.size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( p + i, _mm_add_ps( _mm_load_ps( p + i ), _mm_load_ps( a.p + i ) ) );
}
@@ -315,7 +315,7 @@ idVecX::operator-=
*/
ID_INLINE idVecX &idVecX::operator-=( const idVecX &a ) {
assert( size == a.size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( p + i, _mm_sub_ps( _mm_load_ps( p + i ), _mm_load_ps( a.p + i ) ) );
}
@@ -337,7 +337,7 @@ ID_INLINE idVecX idVecX::operator*( const float a ) const {
idVecX m;
m.SetTempSize( size );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
__m128 va = _mm_load1_ps( & a );
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( m.p + i, _mm_mul_ps( _mm_load_ps( p + i ), va ) );
@@ -356,7 +356,7 @@ idVecX::operator*=
========================
*/
ID_INLINE idVecX &idVecX::operator*=( const float a ) {
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
__m128 va = _mm_load1_ps( & a );
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( p + i, _mm_mul_ps( _mm_load_ps( p + i ), va ) );
@@ -551,7 +551,7 @@ idVecX::Zero
========================
*/
ID_INLINE void idVecX::Zero() {
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( p + i, _mm_setzero_ps() );
}
@@ -567,7 +567,7 @@ idVecX::Zero
*/
ID_INLINE void idVecX::Zero( int length ) {
SetSize( length );
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
for ( int i = 0; i < length; i += 4 ) {
_mm_store_ps( p + i, _mm_setzero_ps() );
}
@@ -611,7 +611,7 @@ idVecX::Negate
========================
*/
ID_INLINE void idVecX::Negate() {
#ifdef VECX_SIMD
#if defined(ID_WIN_X86_SSE_INTRIN) && defined(VECX_SIMD)
ALIGN16( const unsigned int signBit[4] ) = { IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK, IEEE_FLT_SIGN_MASK };
for ( int i = 0; i < size; i += 4 ) {
_mm_store_ps( p + i, _mm_xor_ps( _mm_load_ps( p + i ), (__m128 &) signBit[0] ) );

View File

@@ -451,6 +451,10 @@ ID_INLINE idVec3 operator*( const float a, const idVec3 b ) {
return idVec3( b.x * a, b.y * a, b.z * a );
}
ID_INLINE idVec3 operator/( const float a, const idVec3 b ) {
return idVec3( a / b.x, a / b.y, a / b.z );
}
ID_INLINE idVec3 idVec3::operator+( const idVec3 &a ) const {
return idVec3( x + a.x, y + a.y, z + a.z );
}