如何优化Vector类? [英] How can a Vector class be optimized ?

查看:60
本文介绍了如何优化Vector类?的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

大家好


我正在研究一些使用颜色的代码。直到最近这个代码

使用的颜色代表树浮动(RGB格式),但最近改变了

所以颜色现在被定义为光谱。矢量的大小从

3(RGB)变为151(400 nm到700,每2nm采样一次)。变量

使用一个简单的Vector类定义如下:


模板< typename T,int Depth>

类Vector

{...

};


从RGB版本代码转移到Spectral版本

应用程序显着减慢了道琼斯指数。我做了一个测试,我使用了

Vector class&只需直接使用
上的151个浮点阵列,相同的操作执行100万次。


int maxIter = static_cast< int>(1e + 6);


#include< time.h>


clock_t c1,c0 = clock();


c0 = clock();

for(int i = 0; i< maxIter; ++ i){

float real = 1.245;

float anotherReal = 20.43492342;

float v [151];

float v2 [151];

memset (v,0,sizeof(float)* 151);

memset(v2,0,sizeof(float)* 151);


//混合

for(int j = 0; j< 151; ++ j){

v [j] = v2 [j] *(1.0 - 0.5)+ v [ j] * 0.5;

}


//求和& *

for(int j = 0; j< 151; ++ j){

v [j] = v [j] * real;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] = v [j] * anotherReal;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] + = v [j];

}

}

c1 =时钟();


cerr<< \ nfloat [151]" << endl;

cerr<< 结束CPU时间: << (长)c1<< endl;

cerr<< 经过的CPU时间: << (浮动)(c1 - c0)/ CLOCKS_PER_SEC

<< endl;


c0 = clock();

for(int i = 0; i< maxIter; ++ i){

float real = 1.245;

float anotherReal = 20.43492342;

Vector< float,151v(12.0);

Vector< float, 151v2(-12.0);

v = v2 *(1.0 - 0.5)+ v * 0.5;

v + = Vector< float,151>(10.0)* real * anotherReal ;

}


c1 =时钟();


cerr<< " \ nSuperVector class" << endl;

cerr<< 结束CPU时间: << (长)c1<< endl;

cerr<< 经过的CPU时间: << (浮动)(c1 - c0)/ CLOCKS_PER_SEC

<<结束;


以下是结果

// RGB版本,Vector< float,3>

结束CPU时间:390000

经过的CPU时间:0.39


结束CPU时间:10510000

经过的CPU时间:10.12

//使用151个浮点数的数组

结束CPU时间:13230000

经过的CPU时间:2.72


基本上它当然表明使用Vector类真的很好

减慢了应用程序的速度,尤其是Vector的大小

增加并且不如直接在
浮点数组上执行操作那样高效。所以基本上我的问题是:有没有一种方法可以优化它?


我确实意识到这样做:

Vector< float,151result = Vecotr< float,151>(0.1)* 0.1 * 100.0;


与做的不同:

浮动结果[151] ,temp [151];

for(int i = 0; i< 151; ++ i){

temp [i] = 0.1f;

结果[i] = temp [i] * 0.1 * 100.0;

}


但是我有办法吗?使Vector类与

第二个选项一样高效(这是为了直接对浮点数组进行数学运算

)?或者,如果速度是优先考虑的话,写一些C类型的

代码是我在向量大小变成

问题时能够取回它的唯一方法吗?


感谢您的帮助 -


模板< typename T,int尺寸>

级SuperVector

{


public:

T w [Size];

public:

SuperVector( )

{memset(w,0,sizeof(T)* Size); } $ / $
SuperVector(const T& real)

{

for(int i = 0; i< Size; ++ i) {

(* this).w [i] =真实;

}

}


inline SuperVector< T,Sizeoperator *(const SuperVector< T,Size>

& v)

{

SuperVector< T,Sizesv ;

for(int i = 0; i< Size; ++ i){

sv [i] =(* this).w [i] * vw [i];

}

返回sv;

}


内联SuperVector< T, Sizeoperator *(const T& real)

{

SuperVector< T,Sizesv;

for(int i = 0; i<大小; ++ i){

(* this).w [i] * = real;

}

返回sv;

}

内联SuperVector< T,Sizeoperator +(const SuperVector< T,Size>

& v)

{

SuperVector< T,Sizesv;

for(int i = 0; i< Size; ++ i){

sv。 w [i] =(* this).w [i] + vw [i];

}

返回sv;

}


内联SuperVector< T,尺寸>& operator + =(const SuperVector< T,Size>

& v)

{

for(int i = 0; i<大小; ++ i){

(* this).w [i] + = vw [i];

}

返回*这个;

}

};

Hi everyone

I am working on some code that uses colors. Until recently this code
used colors represented a tree floats (RGB format) but recently changed
so colors are now defined as spectrum. The size of the vector went from
3 (RGB) to 151 (400 nm to 700 with a sample every 2nm). The variables
are using a simple Vector class defined as follow:

template<typename T, int Depth>
class Vector
{ ...
};

Since the move from the RGB version of the code to the Spectral version
the application has significantly slowed dow. I did a test where I use
the Vector class & just a straight usage of arrays of 151 floats on
which the same operations are performed 1 million times.

int maxIter = static_cast<int>( 1e+6 );

#include <time.h>

clock_t c1, c0 = clock();

c0 = clock();
for ( int i = 0; i < maxIter; ++i ) {
float real = 1.245;
float anotherReal = 20.43492342;
float v[ 151 ];
float v2[ 151 ];
memset( v, 0, sizeof( float ) * 151 );
memset( v2, 0, sizeof( float ) * 151 );

// mixing
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v2[ j ] * ( 1.0 - 0.5 ) + v[ j ] * 0.5;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v[ j ] * real;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v[ j ] * anotherReal;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] += v[ j ];
}
}
c1 = clock();

cerr << "\nfloat[ 151 ]" << endl;
cerr << "end CPU time : " << (long)c1 << endl;
cerr << "elapsed CPU time : " << (float)( c1 - c0 ) / CLOCKS_PER_SEC
<< endl;

c0 = clock();
for ( int i = 0; i < maxIter; ++i ) {
float real = 1.245;
float anotherReal = 20.43492342;
Vector<float, 151v( 12.0 );
Vector<float, 151v2( -12.0 );
v = v2 * ( 1.0 - 0.5 ) + v * 0.5;
v += Vector<float, 151>( 10.0 ) * real * anotherReal;
}

c1 = clock();

cerr << "\nSuperVector class" << endl;
cerr << "end CPU time : " << (long)c1 << endl;
cerr << "elapsed CPU time : " << (float)( c1 - c0 ) / CLOCKS_PER_SEC
<< endl;

Here are the results
// RGB version, Vector<float, 3>
end CPU time : 390000
elapsed CPU time : 0.39

// Spectral Version Vector<float, 151>
end CPU time : 10510000
elapsed CPU time : 10.12

// Using arrays of 151 floats
end CPU time : 13230000
elapsed CPU time : 2.72

Basically it of course shows that using the Vector class really really
slows down the application especially has the size of the Vector
increases and is not as efficient as doing the operations on arrays of
floats directly. So basically my question is : is there a way of
optimising it ?

I do realise that doing:
Vector<float, 151result = Vecotr<float, 151>( 0.1 ) * 0.1 * 100.0;

is not the same as doing:
float result[ 151 ], temp [ 151 ];
for ( int i = 0; i < 151; ++i ) {
temp[ i ] = 0.1f;
result[ i ] = temp[ i ] * 0.1 * 100.0;
}

But isn''t there a way i can make the Vector class as efficient as the
second option (which is to do the math operation on arrays of float
directly) ? Or if the speed is a priority is writing some C type of
code the only way i can get it back when the vector size becomes an
issue ?

Thanks for you help -

template<typename T, int Size>
class SuperVector
{

public:
T w[ Size ];
public:
SuperVector()
{ memset( w, 0, sizeof( T ) * Size ); }
SuperVector( const T &real )
{
for ( int i = 0; i < Size; ++i ) {
(*this).w[ i ] = real;
}
}

inline SuperVector<T, Sizeoperator * ( const SuperVector<T, Size>
&v )
{
SuperVector<T, Sizesv;
for ( int i = 0; i < Size; ++i ) {
sv[ i ] = (*this).w[ i ] * v.w[ i ];
}
return sv;
}

inline SuperVector<T, Sizeoperator * ( const T &real )
{
SuperVector<T, Sizesv;
for ( int i = 0; i < Size; ++i ) {
(*this).w[ i ] *= real;
}
return sv;
}
inline SuperVector<T, Sizeoperator + ( const SuperVector<T, Size>
&v )
{
SuperVector<T, Sizesv;
for ( int i = 0; i < Size; ++i ) {
sv.w[ i ] = (*this).w[ i ] + v.w[ i ];
}
return sv;
}

inline SuperVector<T, Size>& operator += ( const SuperVector<T, Size>
&v )
{
for ( int i = 0; i < Size; ++i ) {
(*this).w[ i ] += v.w[ i ];
}
return *this;
}
};

推荐答案

ma ***** @ yahoo.com 写道:
ma*****@yahoo.com wrote:

大家好


我正在研究一些使用颜色的代码。直到最近这个代码

使用的颜色代表树浮动(RGB格式),但最近改变了

所以颜色现在被定义为光谱。矢量的大小从

3(RGB)变为151(400 nm到700,每2nm采样一次)。变量

使用一个简单的Vector类定义如下:


模板< typename T,int Depth>

类Vector

{...

};


从RGB版本代码转移到Spectral版本

应用程序显着减慢了道琼斯指数。我做了一个测试,我使用了

Vector class&只需直接使用

上的151个浮点阵列,相同的操作执行100万次。
Hi everyone

I am working on some code that uses colors. Until recently this code
used colors represented a tree floats (RGB format) but recently changed
so colors are now defined as spectrum. The size of the vector went from
3 (RGB) to 151 (400 nm to 700 with a sample every 2nm). The variables
are using a simple Vector class defined as follow:

template<typename T, int Depth>
class Vector
{ ...
};

Since the move from the RGB version of the code to the Spectral version
the application has significantly slowed dow. I did a test where I use
the Vector class & just a straight usage of arrays of 151 floats on
which the same operations are performed 1 million times.



[代码剪辑]


阅读表达式模板。或者,更好的是,使用线性代数库。

Best


Kai-Uwe Bux

[code snipped]

Read up on expression templates. Or, better, use a linear algebra library.
Best

Kai-Uwe Bux



ma*****@yahoo.com 写道:

大家好


我正在研究一些使用颜色的代码。直到最近这个代码

使用的颜色代表树浮动(RGB格式),但最近改变了

所以颜色现在被定义为光谱。矢量的大小从

3(RGB)变为151(400 nm到700,每2nm采样一次)。变量

使用一个简单的Vector类定义如下:


模板< typename T,int Depth>

类Vector

{...

};


从RGB版本代码转移到Spectral版本

应用程序显着减慢了道琼斯指数。我做了一个测试,我使用了

Vector class&只需直接使用
上的151个浮点阵列,相同的操作执行100万次。


int maxIter = static_cast< int>(1e + 6);


#include< time.h>


clock_t c1,c0 = clock();


c0 = clock();

for(int i = 0; i< maxIter; ++ i){

float real = 1.245;

float anotherReal = 20.43492342;

float v [151];

float v2 [151];

memset (v,0,sizeof(float)* 151);

memset(v2,0,sizeof(float)* 151);


//混合

for(int j = 0; j< 151; ++ j){

v [j] = v2 [j] *(1.0 - 0.5)+ v [ j] * 0.5;

}


//求和& *

for(int j = 0; j< 151; ++ j){

v [j] = v [j] * real;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] = v [j] * anotherReal;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] + = v [j];

}

}

c1 =时钟();


cerr<< \ nfloat [151]" << endl;

cerr<< 结束CPU时间: << (长)c1<< endl;

cerr<< 经过的CPU时间: << (浮动)(c1 - c0)/ CLOCKS_PER_SEC

<< endl;


c0 = clock();

for(int i = 0; i< maxIter; ++ i){

float real = 1.245;

float anotherReal = 20.43492342;

Vector< float,151v(12.0);

Vector< float, 151v2(-12.0);
Hi everyone

I am working on some code that uses colors. Until recently this code
used colors represented a tree floats (RGB format) but recently changed
so colors are now defined as spectrum. The size of the vector went from
3 (RGB) to 151 (400 nm to 700 with a sample every 2nm). The variables
are using a simple Vector class defined as follow:

template<typename T, int Depth>
class Vector
{ ...
};

Since the move from the RGB version of the code to the Spectral version
the application has significantly slowed dow. I did a test where I use
the Vector class & just a straight usage of arrays of 151 floats on
which the same operations are performed 1 million times.

int maxIter = static_cast<int>( 1e+6 );

#include <time.h>

clock_t c1, c0 = clock();

c0 = clock();
for ( int i = 0; i < maxIter; ++i ) {
float real = 1.245;
float anotherReal = 20.43492342;
float v[ 151 ];
float v2[ 151 ];
memset( v, 0, sizeof( float ) * 151 );
memset( v2, 0, sizeof( float ) * 151 );

// mixing
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v2[ j ] * ( 1.0 - 0.5 ) + v[ j ] * 0.5;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v[ j ] * real;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v[ j ] * anotherReal;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] += v[ j ];
}
}
c1 = clock();

cerr << "\nfloat[ 151 ]" << endl;
cerr << "end CPU time : " << (long)c1 << endl;
cerr << "elapsed CPU time : " << (float)( c1 - c0 ) / CLOCKS_PER_SEC
<< endl;

c0 = clock();
for ( int i = 0; i < maxIter; ++i ) {
float real = 1.245;
float anotherReal = 20.43492342;
Vector<float, 151v( 12.0 );
Vector<float, 151v2( -12.0 );



std :: vector< floatv(151,12.0);

std :: vector< floatv2(151,-12.0); <使用与上面数组完全相同的随机迭代器计算得到


请参阅下面的时钟结果。

std::vector<floatv(151, 12.0);
std::vector<floatv2(151, -12.0);

using the exact same random iterator calculations as the array above:
see the clock results below.


v = v2 *(1.0 - 0.5)+ v * 0.5;

v + = Vector< float,151>(10.0)* real * anotherReal;

}


c1 = clock();


cerr<< " \ nSuperVector class" << endl;

cerr<< 结束CPU时间: << (长)c1<< endl;

cerr<< 经过的CPU时间: << (浮动)(c1 - c0)/ CLOCKS_PER_SEC

<<结束;


以下是结果

// RGB版本,Vector< float,3>

结束CPU时间:390000

经过的CPU时间:0.39


结束CPU时间:10510000

经过的CPU时间:10.12

//使用151个浮点数的数组

结束CPU时间:13230000

经过的CPU时间:2.72
v = v2 * ( 1.0 - 0.5 ) + v * 0.5;
v += Vector<float, 151>( 10.0 ) * real * anotherReal;
}

c1 = clock();

cerr << "\nSuperVector class" << endl;
cerr << "end CPU time : " << (long)c1 << endl;
cerr << "elapsed CPU time : " << (float)( c1 - c0 ) / CLOCKS_PER_SEC
<< endl;

Here are the results
// RGB version, Vector<float, 3>
end CPU time : 390000
elapsed CPU time : 0.39

// Spectral Version Vector<float, 151>
end CPU time : 10510000
elapsed CPU time : 10.12

// Using arrays of 151 floats
end CPU time : 13230000
elapsed CPU time : 2.72



_____________________________

结果:


float [151]

结束CPU时间:2620000

经过的CPU时间:2.62

std :: vector class

结束CPU时间: 4680000

经过的CPU时间:2.06

_____________________________
Results:

float[ 151 ]
end CPU time : 2620000
elapsed CPU time : 2.62
std::vector class
end CPU time : 4680000
elapsed CPU time : 2.06


>

基本上它当然表明使用Vector类真的很好

减慢了应用程序的速度,特别是Vector

的大小增加了,并且不如在数组上进行操作那么高效

直接浮动。所以基本上我的问题是:是否有一种优化它的方式?

>
Basically it of course shows that using the Vector class really really
slows down the application especially has the size of the Vector
increases and is not as efficient as doing the operations on arrays of
floats directly. So basically my question is : is there a way of
optimising it ?



是的,使用resize()手动指定容器的大小。


void resize(n,t = T())

- 在末尾插入或删除元素,使得大小变为n

yes, use resize() to manually specify the container''s size.

void resize(n, t = T())
- Inserts or erases elements at the end such that the size becomes n


>

我确实意识到这样做:

Vector< float,151result = Vecotr< float,151>(0.1)* 0.1 * 100.0;


是与做的不一样:

float result [151],temp [151];

for(int i = 0; i< 151; ++ i){

temp [i] = 0.1f;

result [i] = temp [i] * 0.1 * 100.0;

}


但是,有没有办法让Vector类像

第二个选项一样高效(这是对float数组进行数学运算的

直接)?或者,如果速度是优先考虑的话,写一些C类型的

代码是我在向量大小变成

问题时能够取回它的唯一方法吗?


感谢您的帮助 -


模板< typename T,int尺寸>

级SuperVector

{


public:

T w [Size];

public:

SuperVector( )

{memset(w,0,sizeof(T)* Size); } $ / $
SuperVector(const T& real)

{

for(int i = 0; i< Size; ++ i) {

(* this).w [i] =真实;

}

}


inline SuperVector< T,Sizeoperator *(const SuperVector< T,Size>

& v)

{

SuperVector< T,Sizesv ;

for(int i = 0; i< Size; ++ i){

sv [i] =(* this).w [i] * vw [i];

}

返回sv;

}


内联SuperVector< T, Sizeoperator *(const T& real)

{

SuperVector< T,Sizesv;

for(int i = 0; i<大小; ++ i){

(* this).w [i] * = real;

}

返回sv;

}


内联SuperVector< T,Sizeoperator +(const SuperVector< T,Size>

& v)

{

SuperVector< T,Sizesv;

for (int i = 0;我<尺寸; ++ i){

sv.w [i] =(* this).w [i] + vw [i];

}

返回sv;

}


内联SuperVector< T,尺寸>& operator + =(const SuperVector< T,Size>

& v)

{

for(int i = 0; i<大小; ++ i){

(* this).w [i] + = vw [i];

}

返回*这个;

}

};
>
I do realise that doing:
Vector<float, 151result = Vecotr<float, 151>( 0.1 ) * 0.1 * 100.0;

is not the same as doing:
float result[ 151 ], temp [ 151 ];
for ( int i = 0; i < 151; ++i ) {
temp[ i ] = 0.1f;
result[ i ] = temp[ i ] * 0.1 * 100.0;
}

But isn''t there a way i can make the Vector class as efficient as the
second option (which is to do the math operation on arrays of float
directly) ? Or if the speed is a priority is writing some C type of
code the only way i can get it back when the vector size becomes an
issue ?

Thanks for you help -

template<typename T, int Size>
class SuperVector
{

public:
T w[ Size ];
public:
SuperVector()
{ memset( w, 0, sizeof( T ) * Size ); }
SuperVector( const T &real )
{
for ( int i = 0; i < Size; ++i ) {
(*this).w[ i ] = real;
}
}

inline SuperVector<T, Sizeoperator * ( const SuperVector<T, Size>
&v )
{
SuperVector<T, Sizesv;
for ( int i = 0; i < Size; ++i ) {
sv[ i ] = (*this).w[ i ] * v.w[ i ];
}
return sv;
}

inline SuperVector<T, Sizeoperator * ( const T &real )
{
SuperVector<T, Sizesv;
for ( int i = 0; i < Size; ++i ) {
(*this).w[ i ] *= real;
}
return sv;
}
inline SuperVector<T, Sizeoperator + ( const SuperVector<T, Size>
&v )
{
SuperVector<T, Sizesv;
for ( int i = 0; i < Size; ++i ) {
sv.w[ i ] = (*this).w[ i ] + v.w[ i ];
}
return sv;
}

inline SuperVector<T, Size>& operator += ( const SuperVector<T, Size>
&v )
{
for ( int i = 0; i < Size; ++i ) {
(*this).w[ i ] += v.w[ i ];
}
return *this;
}
};


" ma ***** @ yahoo.com" < ma ***** @ yahoo.comwrote:
"ma*****@yahoo.com" <ma*****@yahoo.comwrote:

v = v2 *(1.0 - 0.5)+ v * 0.5;

v + =向量< float,151>(10.0)* real * anotherReal;
v = v2 * ( 1.0 - 0.5 ) + v * 0.5;
v += Vector<float, 151>( 10.0 ) * real * anotherReal;



通过删除op *和op +来优化矢量类。太多了

临时正在创建。


这是一个有趣的驱动:


int maxIter = static_cast< int>(1e + 6);


clock_t c1,c0 = clock();


struct binary_op

{

float operator()(float lhs,float rhs)const

{

return lhs *(1.0 - 0.5)+ rhs * 0.5;

}

};


struct unary_op

{

unary_op(float r,float r2):real(r),anotherReal(r2){}

float operator()(float v)const {

return v + 10.0 *真实* anotherReal;

}

const float real,anotherReal;

};


int main(){

float real = 1.245;

float anotherReal = 20.43492342;

vector< floatv(151,12.0);

vector< floatv2(151,-12.0);

c0 = clock();

for(int i = 0; i< maxIter; ++ i){

//混合

for( int j = 0; j< 151; ++ j){

v [j] = v2 [j] *(1.0 - 0.5)+ v [j] * 0.5;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] = v [j] * real;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] = v [j] * anotherReal;

}


//总结& *

for(int j = 0; j< 151; ++ j){

v [j] + = v [j];

}

}

c1 =时钟();


cerr<< \ nManual iteration << endl;

cerr<< 结束CPU时间: << (长)c1<< endl;

cerr<< 经过的CPU时间: << (浮动)(c1 - c0)/ CLOCKS_PER_SEC

<< endl;


for(int i = 0; i< 151; ++ i){

v [i] = 12.0;

v2 [i] = -12.0;

}


c0 = clock();

for(int i = 0; i< maxIter; ++ i){

transform(v2.begin(),v2.end(),v.begin(),v.begin(),
binary_op());

transform(v.begin(),v.end(),v.begin(),

unary_op(真实, anotherReal));

}

c1 = clock();

cerr<< \\\
Algorithm使用 << endl;

cerr<< 结束CPU时间: << (长)c1<< endl;

cerr<< 经过的CPU时间: << (浮动)(c1 - c0)/ CLOCKS_PER_SEC

<<结束;

}


我的输出:


手动迭代

结束CPU时间:174

经过的CPU时间:1.74

算法使用

结束CPU时间:265

经过的CPU时间:0.91

-

要给我发电子邮件,请输入sheltie在主题中。

Optimize your vector class by removing the op* and op+. Too many
temporaries are being created.

Here is an interesting exorcise:

int maxIter = static_cast<int>( 1e+6 );

clock_t c1, c0 = clock();

struct binary_op
{
float operator()( float lhs, float rhs ) const
{
return lhs * ( 1.0 - 0.5 ) + rhs * 0.5;
}
};

struct unary_op
{
unary_op( float r, float r2 ): real( r ), anotherReal( r2 ) { }
float operator()( float v ) const {
return v + 10.0 * real * anotherReal;
}
const float real, anotherReal;
};

int main() {
float real = 1.245;
float anotherReal = 20.43492342;
vector<floatv( 151, 12.0 );
vector<floatv2( 151, -12.0 );
c0 = clock();
for ( int i = 0; i < maxIter; ++i ) {
// mixing
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v2[ j ] * ( 1.0 - 0.5 ) + v[ j ] * 0.5;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v[ j ] * real;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] = v[ j ] * anotherReal;
}

// summing up & *
for ( int j = 0; j < 151; ++j ) {
v[ j ] += v[ j ];
}
}
c1 = clock();

cerr << "\nManual iteration" << endl;
cerr << "end CPU time : " << (long)c1 << endl;
cerr << "elapsed CPU time : " << (float)( c1 - c0 ) / CLOCKS_PER_SEC
<< endl;

for ( int i = 0; i < 151; ++i ) {
v[i] = 12.0;
v2[i] = -12.0;
}

c0 = clock();
for ( int i = 0; i < maxIter; ++i ) {
transform( v2.begin(), v2.end(), v.begin(), v.begin(),
binary_op() );
transform( v.begin(), v.end(), v.begin(),
unary_op( real, anotherReal ) );
}
c1 = clock();
cerr << "\nAlgorithm Use" << endl;
cerr << "end CPU time : " << (long)c1 << endl;
cerr << "elapsed CPU time : " << (float)( c1 - c0 ) / CLOCKS_PER_SEC
<< endl;
}

My output:

Manual iteration
end CPU time : 174
elapsed CPU time : 1.74

Algorithm Use
end CPU time : 265
elapsed CPU time : 0.91

--
To send me email, put "sheltie" in the subject.


这篇关于如何优化Vector类?的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆