防止内存碎片 [英] Preventing memory fragmentation

查看:70
本文介绍了防止内存碎片的处理方法,对大家解决问题具有一定的参考价值,需要的朋友们下面随着小编来一起学习吧!

问题描述

给出以下有关C ++中内存管理的信息:


-----

c-runtime动态内存管理器(以及大多数其他商业内容)内存

管理员)存在类似于硬盘驱动器文件的碎片问题

系统。

随着时间的推移,越频繁使用调用new / delete或分配/免费,那里

将是b / b
堆中的缺口和碎片。这可能会导致使用内存的效率低下,以及缓存命中效率低下。


性能关键型应用程序的目标是最小化



调用new / delete的数量。许多人使用缓冲区管理器分配

连续内存块,并在消耗



时将它们与列表指针链接在一起。另一种方法是使用备用列表,其中每个

块的内存实际上并没有释放,而是返回给池管理器。

-----


我已经实现了一个内存池,它将合并内存块

,当它们被释放时返回到池中。我感兴趣

这个实现的有效性超过默认内存

经理,以及我在使用
$ b $时可能想要改变或考虑的内容b特定应用程序中的池。我很感激这个团队提供的任何建设性的

反馈。


实现和测试池的代码如下:


#include< stdlib.h>

#include< time.h>

#include< string.h>


#include< new>

#include< list>


class MemoryPool

{

public:

显式MemoryPool(size_t memorySize);

~MemoryPool();


void * Reserve(size_t size);

void Release(void * pMemory);


private:

typedef unsigned char字节;

class Block

{

public:

static Block * FromMemory(void * pMemory);


显式块(size_t size,Block * pPrior = NULL,Block *

pNext = NULL);


size_t GetSize()const;

void * GetMemory();

Block * Prior();

Block * Next();


Block * Spl itOff(size_t size);

Block * FuseWith(Block * pBlock);


private:

const Block * End( )const;


size_t m_size;

Block * m_pPrior;

Block * m_pNext;

};


Block * m_pFirstBlock;

void * m_pMemoryPool;

};


MemoryPool :: MemoryPool



size_t memorySize



{

m_pMemoryPool = :: operator new(memorySize);

:: memset(m_pMemoryPool,0,memorySize);


size_t blockSize = memorySize - sizeof(Block );

m_pFirstBlock = new(m_pMemoryPool)Block(blockSize);

}


MemoryPool :: ~MemoryPool()

{

:: operator delete(m_pMemoryPool);

}


void * MemoryPool ::预订



size_t size



{

Block * pBlock = m_pFirstBlock;

while(pBlock-> GetSize()< ()=($ = $ pBlock){
();

}

}


Block * pFreeBlock = pBlock-> SplitOff(size);


if(NULL == pFreeBlock-> Prior()){

m_pFirstBlock = pFreeBlock;

}

返回pBlock-> GetMemory();

}


void MemoryPool :: Release



void * pMemory



{

Block * pBlock = Block :: FromMemory(pMemory);


Block * pNextBlock = m_pFirstBlock;

Block * pPriorBlock = NULL;


while(pBlock> pNextBlock){

pPriorBlock = pNextBlock;

pNextBlock = pNextBlock-> Next();

if(NULL == pNextBlock){

休息;

}

}


if(NULL!= pNextBlock){

pBlock = pNextBlock-> FuseWith(pBlock);

}


if(NULL!= pPriorBlock){

的pP riorBlock-> FuseWith(pBlock);

}

else {

m_pFirstBlock = pBlock;

} < br $>
}


MemoryPool :: Block * MemoryPool :: Block :: FromMemory



void * pMemory



{

Byte * pBytes = reinterpret_cast< Byte *>(pMemory);

return reinterpret_cast< Block *>(pBytes - sizeof(Block));

}


MemoryPool :: Block :: Block



size_t尺寸,

Block * pPrior,

Block * pNext

):m_size(size ),

m_pPrior(pPrior),

m_pNext(pNext)

{

}


size_t MemoryPool :: Block :: GetSize()const

{

返回m_size;

}


void * MemoryPool :: Block :: GetMemory()

{

Byte * pMemory = reinterpret_cast< Byte *>(this) ;

pMemory + = sizeof(* this);

返回pMemory;

}


记忆池:: B1中ock * MemoryPool :: Block :: Prior()

{

返回m_pPrior;

}


MemoryPool :: Block * MemoryPool :: Block :: Next()

{

返回m_pNext;

}


MemoryPool :: Block * MemoryPool :: Block :: SplitOff



size_t size



{

Block * pFreeBlock = NULL;

size_t totalSize = sizeof(* this)+ size;

if(( totalSize> = m_size)){

pFreeBlock = m_pNext;

pFreeBlock-> m_pPrior = m_pPrior;

}

else {

Byte * pNextBlock = reinterpret_cast< Byte *>(this);

pNextBlock + = totalSize;


pFreeBlock = new(pNextBlock)Block(m_size - totalSize,

m_pPrior,

m_pNext);


if(NULL != m_pNext){

m_pNext-> m_pPrior = pFreeBlock;

}

}


if(NULL!= m_pPrior){

m_pPrior-> m_pNext = pFreeBlock;

}


m_size = size;

m_pPrior = NULL;

m_pNext = NULL;


返回pFreeBlock;

}


MemoryPool :: Block * MemoryPool :: Block :: FuseWith



Block * pBlock



{

if(pBlock<这个){

if(pBlock-> End()== this){

pBlock-> m_size + =(sizeof(* this)+ m_size);

pBlock-> m_pNext = m_pNext;


if(NULL!= m_pNext){

m_pNext-> m_pPrior = pBlock;

}

}

else {

pBlock-> m_pNext = this;

}

}


else {

if(this-> End()== pBlock){

m_size + =(sizeof(* this)+ pBlock-> GetSize());

m_pNext = pBlock-> m_pNext;

返回此;

}

else {

pBlock-> m_pPrior = this;

m_pNext = pBlock ;

}

}


返回pBlock;

}

const MemoryPool :: Block * MemoryPool :: Block :: End()const

{

const Byte * pEnd = reinterpret_cast< const Byte *> (this);

pEnd + =(sizeof(* this)+ m_size);

返回reinterpret_cast< const Block *>(pEnd);

}


等级缓冲区

{

私有:

typedef unsigned char字节;


static const size_t POOL_SIZE = 4096;

静态MemoryPool m_memoryPool;

字节* m_pData;


public:

static void * operator new(size_t size);

static void operator delete(void * pMemory);


显式缓冲区(size_t size);

~Buffer();

};


MemoryPool Buffer :: m_memoryPool(Buffer :: POOL_SIZE);


void * Buffer :: operator new



size_t size



{

返回m_memoryPool.Reserve(size);

}


void Buffer :: operator delete



void * pMemory



{

m_memoryPool.Release(pMemory);

}


缓冲区::缓冲区



size_t size



{

m_pData = reinterpret_cast< Byte *>(m_memoryPool.Reserve(size));
:: memset(m_pData,0xFF,size);

}


缓冲区::〜缓冲区()

{

m_memoryPool.Release(m_pData);

}


typedef std :: list< Buffer *> ; BufferList;

typedef BufferList :: iterator BufferIterator;


void AddBuffer(BufferList * pList,size_t size = 0);

void RemoveBuffer(BufferList * pList,size_t buffer = 0);

void ClearBufferList(BufferList * pList);


void AddBuffer



BufferList * pList,

size_t size



{

const size_t MAX_BUFFER_SIZE = 128;

const size_t MIN_BUFFER_SIZE = 16;


if(0 == size){

size = :: rand()%MAX_BUFFER_SIZE + 1;

if(MIN_BUFFER_SIZE> size){

size = MIN_BUFFER_SIZE;

}

}


pList-> push_back(新缓冲区(大小));

}


void RemoveBuffer



BufferList * pList,

size_t buffer



{

size_t size = pList-> size();


if(0 == buffer ||(buffer> = size)) {

buffer = :: rand()%size; < br $>
}


BufferIterator itBuffer = pList-> begin();

std :: advance(itBuffer,buffer);


缓冲区* pBuffer = * itBuffer;

pList-> erase(itBuffer);

删除pBuffer;

}


void ClearBufferList



BufferList * pList



{

BufferIterator itBuffer = pList-> begin();

const BufferIterator itEND = pList-> end();

while(它END!= itBuffer){

delete * itBuffer;

++ itBuffer;

}

}


int main()

{

:: srand(:: time(NULL));

BufferList bufferList;


const int nBUFFER_COUNT = 20;

for(int nBuffer; nBUFFER_COUNT> nBuffer; ++ nBuffer){

:: AddBuffer(& bufferList);

}


for(int nRemove = 0; 5> nRemove; ++ nRemove){

:: RemoveBuffer(& bufferList);

}


for( int nTest = 0; 10> nTest; ++ nTest){

if(0 == :: rand()%2){

:: AddBuffer(& ; bufferList);

}

else {

:: RemoveBuffer(& bufferList);

} < br $>
}


:: ClearBufferList(& bufferList);

}

Given the following information about memory management in C++:

-----
The c-runtime dynamic memory manager (and most other commercial memory
managers) has issues with fragmentation similar to a hard drive file
system.
Over time, the more often use call new/delete or alloc/free, there
will be
gaps and fragments in the heap. This can lead to inefficient use of
available memory, as well as cache-hit inefficiencies.

The goal of performance-critical applications is to minimize the
number of
calls to new/delete. Many use buffer managers that allocate blocks of
contiguous memory, and chain them together with list pointers when the
block
is consumed. Another approach is to use a Spare List, in which each
block of
memory is not actually freed but returned to a pool manager.
-----

I have implemented a memory pool that will coalesce blocks of memory
that are returned to the pool when they are freed up. I am interested
in the effectiveness of this implementation over the default memory
manager, and what I might want to change or consider when using the
pool in a specific application. I would appreciate any constructive
feedback this group has to offer.

The code to implement and test the pool is as follows:

#include <stdlib.h>
#include <time.h>
#include <string.h>

#include <new>
#include <list>

class MemoryPool
{
public:
explicit MemoryPool(size_t memorySize);
~MemoryPool();

void* Reserve(size_t size);
void Release(void* pMemory);

private:
typedef unsigned char Byte;
class Block
{
public:
static Block* FromMemory(void* pMemory);

explicit Block(size_t size, Block* pPrior = NULL, Block*
pNext = NULL);

size_t GetSize() const;
void* GetMemory();
Block* Prior();
Block* Next();

Block* SplitOff(size_t size);
Block* FuseWith( Block* pBlock);

private:
const Block* End() const;

size_t m_size;
Block* m_pPrior;
Block* m_pNext;
};

Block* m_pFirstBlock;
void* m_pMemoryPool;
};

MemoryPool::MemoryPool
(
size_t memorySize
)
{
m_pMemoryPool = ::operator new(memorySize);
::memset(m_pMemoryPool, 0, memorySize);

size_t blockSize = memorySize - sizeof(Block);
m_pFirstBlock = new(m_pMemoryPool) Block(blockSize);
}

MemoryPool::~MemoryPool()
{
::operator delete(m_pMemoryPool);
}

void* MemoryPool::Reserve
(
size_t size
)
{
Block* pBlock = m_pFirstBlock;
while(pBlock->GetSize() < size){
pBlock = pBlock->Next();
if(NULL == pBlock){
throw std::bad_alloc();
}
}

Block* pFreeBlock = pBlock->SplitOff(size);

if(NULL == pFreeBlock->Prior()){
m_pFirstBlock = pFreeBlock;
}

return pBlock->GetMemory();
}

void MemoryPool::Release
(
void* pMemory
)
{
Block* pBlock = Block::FromMemory(pMemory);

Block* pNextBlock = m_pFirstBlock;
Block* pPriorBlock = NULL;

while(pBlock > pNextBlock){
pPriorBlock = pNextBlock;
pNextBlock = pNextBlock->Next();
if(NULL == pNextBlock){
break;
}
}

if(NULL != pNextBlock){
pBlock = pNextBlock->FuseWith(pBlock);
}

if(NULL != pPriorBlock){
pPriorBlock->FuseWith(pBlock);
}
else{
m_pFirstBlock = pBlock;
}
}

MemoryPool::Block* MemoryPool::Block::FromMemory
(
void* pMemory
)
{
Byte* pBytes = reinterpret_cast<Byte*>(pMemory);
return reinterpret_cast<Block*>(pBytes - sizeof(Block));
}

MemoryPool::Block::Block
(
size_t size,
Block* pPrior,
Block* pNext
) : m_size(size),
m_pPrior(pPrior),
m_pNext(pNext)
{
}

size_t MemoryPool::Block::GetSize() const
{
return m_size;
}

void* MemoryPool::Block::GetMemory()
{
Byte* pMemory = reinterpret_cast<Byte*>(this);
pMemory += sizeof(*this);
return pMemory;
}

MemoryPool::Block* MemoryPool::Block::Prior()
{
return m_pPrior;
}

MemoryPool::Block* MemoryPool::Block::Next()
{
return m_pNext;
}

MemoryPool::Block* MemoryPool::Block::SplitOff
(
size_t size
)
{
Block* pFreeBlock = NULL;
size_t totalSize = sizeof(*this) + size;
if((totalSize >= m_size)){
pFreeBlock = m_pNext;
pFreeBlock->m_pPrior = m_pPrior;
}
else{
Byte* pNextBlock = reinterpret_cast<Byte*>(this);
pNextBlock += totalSize;

pFreeBlock = new(pNextBlock) Block(m_size - totalSize,
m_pPrior,
m_pNext);

if(NULL != m_pNext){
m_pNext->m_pPrior = pFreeBlock;
}
}

if(NULL != m_pPrior){
m_pPrior->m_pNext = pFreeBlock;
}

m_size = size;
m_pPrior = NULL;
m_pNext = NULL;

return pFreeBlock;
}

MemoryPool::Block* MemoryPool::Block::FuseWith
(
Block* pBlock
)
{
if(pBlock < this){
if(pBlock->End() == this){
pBlock->m_size += (sizeof(*this) + m_size);
pBlock->m_pNext = m_pNext;

if(NULL != m_pNext){
m_pNext->m_pPrior = pBlock;
}
}
else{
pBlock->m_pNext = this;
}
}

else{
if(this->End() == pBlock){
m_size += (sizeof(*this) + pBlock->GetSize());
m_pNext = pBlock->m_pNext;
return this;
}
else{
pBlock->m_pPrior = this;
m_pNext = pBlock;
}
}

return pBlock;
}

const MemoryPool::Block* MemoryPool::Block::End() const
{
const Byte* pEnd = reinterpret_cast<const Byte*>(this);
pEnd += (sizeof(*this) + m_size);
return reinterpret_cast<const Block*>(pEnd);
}

class Buffer
{
private:
typedef unsigned char Byte;

static const size_t POOL_SIZE = 4096;
static MemoryPool m_memoryPool;
Byte* m_pData;

public:
static void* operator new(size_t size);
static void operator delete(void* pMemory);

explicit Buffer(size_t size);
~Buffer();
};

MemoryPool Buffer::m_memoryPool(Buffer::POOL_SIZE);

void* Buffer::operator new
(
size_t size
)
{
return m_memoryPool.Reserve(size);
}

void Buffer::operator delete
(
void* pMemory
)
{
m_memoryPool.Release(pMemory);
}

Buffer::Buffer
(
size_t size
)
{
m_pData = reinterpret_cast<Byte*>(m_memoryPool.Reserve(size) );
::memset(m_pData, 0xFF, size);
}

Buffer::~Buffer()
{
m_memoryPool.Release(m_pData);
}

typedef std::list<Buffer*> BufferList;
typedef BufferList::iterator BufferIterator;

void AddBuffer(BufferList* pList, size_t size = 0);
void RemoveBuffer(BufferList* pList, size_t buffer = 0);
void ClearBufferList(BufferList* pList);

void AddBuffer
(
BufferList* pList,
size_t size
)
{
const size_t MAX_BUFFER_SIZE = 128;
const size_t MIN_BUFFER_SIZE = 16;

if(0 == size){
size = ::rand() % MAX_BUFFER_SIZE + 1;
if(MIN_BUFFER_SIZE > size){
size = MIN_BUFFER_SIZE;
}
}

pList->push_back(new Buffer(size));
}

void RemoveBuffer
(
BufferList* pList,
size_t buffer
)
{
size_t size = pList->size();

if(0 == buffer || (buffer >= size)){
buffer = ::rand() % size;
}

BufferIterator itBuffer = pList->begin();
std::advance(itBuffer, buffer);

Buffer* pBuffer = *itBuffer;
pList->erase(itBuffer);
delete pBuffer;
}

void ClearBufferList
(
BufferList* pList
)
{
BufferIterator itBuffer = pList->begin();
const BufferIterator itEND = pList->end();
while(itEND != itBuffer){
delete *itBuffer;
++itBuffer;
}
}

int main()
{
::srand(::time(NULL));
BufferList bufferList;

const int nBUFFER_COUNT = 20;
for(int nBuffer; nBUFFER_COUNT > nBuffer; ++nBuffer){
::AddBuffer(&bufferList);
}

for(int nRemove = 0; 5 > nRemove; ++nRemove){
::RemoveBuffer(&bufferList);
}

for(int nTest = 0; 10 > nTest; ++nTest){
if(0 == ::rand() % 2){
::AddBuffer(&bufferList);
}
else{
::RemoveBuffer(&bufferList);
}
}

::ClearBufferList(&bufferList);
}

推荐答案



" Tron Thomas" < TR ********* @ verizon.net>在消息新闻中写道:a4 ************************** @ posting.google.c om ...> > c->运行时

动态内存管理器(以及大多数其他商业内存

"Tron Thomas" <tr*********@verizon.net> wrote in message news:a4**************************@posting.google.c om...> > The c-> runtime
dynamic memory manager (and most other commercial memory
管理器)存在framentaiton问题。这可能导致可用内存的低效使用,以及缓存命中效率低下。


是的。有些分配器比其他分配器做得更好。

性能关键应用程序的目标是最小化对新/删除的调用次数。


性能关键型应用程序的目标是尽量减少一切。

我认为你的意思是全球运营商new / delete。

我已经实现了一个内存池,它将合并内存块,当它们被释放时返回到池中。
managers) has issues with framentaiton. This can lead to inefficient use of
available memory, as well as cache-hit inefficiencies.
True. Some allocators do better than others.
The goal of performance-critical applications is to minimize the
number of calls to new/delete.
The goal of performance-critical applications is to minimize EVERYTHING.
I aasume you mean to the global operator new/delete.
I have implemented a memory pool that will coalesce blocks of memory
that are returned to the pool when they are freed up.




所以几乎每个默认分配器在那里。你的代码也是次优的

,因为它表现出众所周知的不良行为,在你寻找一个块的时候,在整个记忆中徘徊。

随机页面。此外,我在你的方法中看不到任何可以减少碎片的东西。


你可能想要谷歌周围的一些先前的关于这个的艺术。



So do almost every default allocator out there. Your code is also suboptimal
as it exhibits the well-known bad behavior of wandering all over memory touching
random pages when you are looking for a block. Further, I can''t see anything
in your approach that does anything to diminish fragmentation,

You might want to google around for some of the prior art on this.


对于我的应用程序我只在堆上分配特定类别的对象,所以我不喜欢我不相信我需要覆盖全球新的和删除运营商。


" Ron Natalie" < ro*@sensor.com>在留言新闻中写道:< 3f ********************* @ news.newshosting.com> ...
For my application I am only allocating a specific class of objects on
the heap, so I don''t believe that I woud need to override the global
new and delete operators.

"Ron Natalie" <ro*@sensor.com> wrote in message news:<3f*********************@news.newshosting.com >...
目标性能关键型应用程序是最小化一切。
我认为你的意思是全局运算符new / delete。
The goal of performance-critical applications is to minimize EVERYTHING.
I aasume you mean to the global operator new/delete.
我已经实现了一个内存池,它将合并内存块
当它们被释放时返回到池中。
所以几乎每个默认分配器都在那里。你的代码也是次优的
因为它表现出众所周知的不良行为,当你正在寻找一个块时,在整个记忆中徘徊触摸
随机页面。此外,我在你的方法中看不到任何可以减少碎片的事情,
I have implemented a memory pool that will coalesce blocks of memory
that are returned to the pool when they are freed up.
So do almost every default allocator out there. Your code is also suboptimal
as it exhibits the well-known bad behavior of wandering all over memory touching
random pages when you are looking for a block. Further, I can''t see anything
in your approach that does anything to diminish fragmentation,




我预计大多数分配器已经处理了合并,所以我很想知道我自己实施的是什么。

这就是我发布这篇文章的动机

你我可能想在谷歌周围寻找一些现有技术。



I expected that most allocator would already handle coalescing, so I
was really wondering what I was getting out of implementing it myself.
That''s what motivated me to make this post

You might want to google around for some of the prior art on this.




在尝试实施之前,我曾尝试使用谷歌进行研究。我

没有找到任何有用的东西。你有什么建议我搜索?



I tried using Google to research before doing this implementation. I
did not find anything useful. What would you suggest I search under?




Tron Thomas < TR ********* @ verizon.net>在消息新闻中写道:a4 ************************* @ posting.google.co m ...

"Tron Thomas" <tr*********@verizon.net> wrote in message news:a4*************************@posting.google.co m...

在执行此实现之前,我尝试使用Google进行研究。我没有找到任何有用的东西。你有什么建议我在下面搜索?

I tried using Google to research before doing this implementation. I
did not find anything useful. What would you suggest I search under?




大量的东西出现了
http://www.google.com/search?hl = en& i ... ion + algorithms


这篇关于防止内存碎片的文章就介绍到这了,希望我们推荐的答案对大家有所帮助,也希望大家多多支持IT屋!

查看全文
登录 关闭
扫码关注1秒登录
发送“验证码”获取 | 15天全站免登陆