ns-3 Direct Code Execution
API
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
kingsley-alloc.cc
Go to the documentation of this file.
1 #include "kingsley-alloc.h"
2 #include <string.h>
3 #include <sys/mman.h>
4 #include <stdlib.h>
5 #include "ns3/assert.h"
6 #include "ns3/log.h"
7 
9 
10 #ifdef HAVE_VALGRIND_H
11 # include "valgrind/valgrind.h"
12 # include "valgrind/memcheck.h"
13 # define REPORT_MALLOC(buffer, size) \
14  VALGRIND_MALLOCLIKE_BLOCK (buffer,size, 0, 0)
15 # define REPORT_FREE(buffer) \
16  VALGRIND_FREELIKE_BLOCK (buffer, 0)
17 # define MARK_DEFINED(buffer, size) \
18  VALGRIND_MAKE_MEM_DEFINED (buffer, size)
19 # define MARK_UNDEFINED(buffer, size) \
20  VALGRIND_MAKE_MEM_UNDEFINED (buffer, size)
21 #else
22 # define REPORT_MALLOC(buffer, size)
23 # define REPORT_FREE(buffer)
24 # define MARK_DEFINED(buffer, size)
25 # define MARK_UNDEFINED(buffer, size)
26 #endif
27 
28 
30  : m_defaultMmapSize (1 << 15)
31 {
32  NS_LOG_FUNCTION (this);
33  memset (m_buckets, 0, sizeof(m_buckets));
34 }
36 {
37  NS_LOG_FUNCTION (this);
38  // return;
39  for (std::list<struct KingsleyAlloc::MmapChunk>::iterator i = m_chunks.begin ();
40  i != m_chunks.end (); ++i)
41  {
42  if (i->copy)
43  {
44  // ok, this means that _our_ buffer is not the
45  // original mmap buffer which means that we were
46  // cloned once so, we need to free our local
47  // buffer.
48  free (i->copy);
49 
50  if (i->copy == i->mmap->current)
51  {
52  // Current must be nullify because we the next switch of context do not need to save our heap.
53  i->mmap->current = 0;
54  }
55  i->copy = 0;
56  }
57  i->mmap->refcount--;
58  if (i->mmap->refcount == 0)
59  {
60  // we are the last to release this chunk.
61  // so, release the mmaped data.
62  MmapFree (i->mmap->buffer, i->mmap->size);
63  delete i->mmap;
64  }
65  }
66  m_chunks.clear ();
67 }
68 // Call me only from my context
69 void
71 {
72  NS_LOG_FUNCTION (this);
73  for (std::list<struct KingsleyAlloc::MmapChunk>::iterator i = m_chunks.begin ();
74  i != m_chunks.end (); ++i)
75  {
76  if (i->copy == i->mmap->current)
77  {
78  // Current must be nullify because we the next switch of context do not need to save our heap.
79  i->mmap->current = 0;
80  }
81  }
82 }
85 {
86  NS_LOG_FUNCTION (this << "begin");
87  KingsleyAlloc *clone = new KingsleyAlloc ();
88  *clone->m_buckets = *m_buckets;
89  for (std::list<struct KingsleyAlloc::MmapChunk>::iterator i = m_chunks.begin ();
90  i != m_chunks.end (); ++i)
91  {
92  struct KingsleyAlloc::MmapChunk chunk = *i;
93  chunk.mmap->refcount++;
94  if ((chunk.mmap->refcount == 2)&&(0 == chunk.copy))
95  {
96  // this is the first clone of this heap so, we first
97  // create buffer copies for ourselves
98  chunk.mmap->current = i->copy = (uint8_t *)malloc (chunk.mmap->size);
99  }
100  // now, we create a buffer copy for the clone
101  struct KingsleyAlloc::MmapChunk chunkClone = chunk;
102  chunkClone.copy = (uint8_t *)malloc (chunkClone.mmap->size);
103  // Save the heap in the clone copy memory
104  memcpy (chunkClone.copy, chunk.mmap->buffer, chunk.mmap->size);
105  clone->m_chunks.push_back (chunkClone);
106  }
107  NS_LOG_FUNCTION (this << "end");
108  return clone;
109 }
110 
111 void
113 {
114  NS_LOG_FUNCTION (this);
115  for (std::list<struct KingsleyAlloc::MmapChunk>::const_iterator i = m_chunks.begin ();
116  i != m_chunks.end (); ++i)
117  {
118  struct KingsleyAlloc::MmapChunk chunk = *i;
119 
120  // save the previous user's heap if necessary
121  if (chunk.mmap->current && (chunk.mmap->current != chunk.mmap->buffer))
122  {
123  memcpy (chunk.mmap->current, chunk.mmap->buffer, chunk.mmap->size);
124  }
125 
126  // swap in our own copy of the heap if necessary
127  if (chunk.copy && (chunk.mmap->buffer != chunk.copy))
128  {
129  memcpy (chunk.mmap->buffer, chunk.copy, chunk.mmap->size);
130  }
131  // and, now, remember that _we_ own the heap
132  chunk.mmap->current = chunk.copy;
133  }
134 }
135 
136 void
137 KingsleyAlloc::MmapFree (uint8_t *buffer, uint32_t size)
138 {
139  NS_LOG_FUNCTION (this << (void*)buffer << size);
140  int status;
141  status = ::munmap (buffer, size);
142  NS_ASSERT_MSG (status == 0, "Unable to release mmaped buffer");
143 }
144 void
146 {
147  NS_LOG_FUNCTION (this << size);
148  struct Mmap *mmap_struct = new Mmap ();
149  mmap_struct->refcount = 1;
150  mmap_struct->size = size;
151  mmap_struct->buffer = (uint8_t*)::mmap (0, size, PROT_READ | PROT_WRITE,
152  MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
153  NS_ASSERT_MSG (mmap_struct->buffer != MAP_FAILED, "Unable to mmap memory buffer");
154  mmap_struct->current = mmap_struct->buffer;
155  struct MmapChunk chunk;
156  chunk.mmap = mmap_struct;
157  chunk.brk = 0;
158  chunk.copy = 0; // no clone yet, no copy yet.
159 
160  m_chunks.push_front (chunk);
161  NS_LOG_DEBUG ("mmap alloced=" << size << " at=" << (void*)mmap_struct->buffer);
162  MARK_UNDEFINED (mmap_struct->buffer, size);
163 }
164 
165 uint8_t *
166 KingsleyAlloc::Brk (uint32_t needed)
167 {
168  NS_LOG_FUNCTION (this << needed);
169  for (std::list<struct KingsleyAlloc::MmapChunk>::iterator i = m_chunks.begin ();
170  i != m_chunks.end (); ++i)
171  {
172  NS_ASSERT (i->mmap->size >= i->brk);
173  if (i->mmap->size - i->brk >= needed)
174  {
175  uint8_t *buffer = i->mmap->buffer + i->brk;
176  i->brk += needed;
177  NS_LOG_DEBUG ("brk: needed=" << needed << ", left=" << i->mmap->size - i->brk);
178  return buffer;
179  }
180  }
181  NS_ASSERT_MSG (needed <= m_defaultMmapSize, needed << " " << m_defaultMmapSize);
183  return Brk (needed);
184 }
185 uint8_t
187 {
188  NS_LOG_FUNCTION (this << sz);
189  uint8_t bucket = 0;
190  uint32_t size = sz;
191  size--;
192  while (size > 7)
193  {
194  size >>= 1;
195  bucket++;
196  }
197  NS_ASSERT (bucket < 32);
198  NS_LOG_DEBUG ("size=" << sz << ", bucket=" << (uint32_t)bucket << ", size=" <<
199  BucketToSize (bucket));
200  return bucket;
201 }
202 uint32_t
204 {
205  uint32_t size = (1 << (bucket + 3));
206  return size;
207 }
208 
209 uint8_t *
210 KingsleyAlloc::Malloc (uint32_t size)
211 {
212  NS_LOG_FUNCTION (this << size);
213  if (size < m_defaultMmapSize)
214  {
215  uint8_t bucket = SizeToBucket (size);
216  if (m_buckets[bucket] == 0)
217  {
218  struct Available *avail = (struct Available *)Brk (BucketToSize (bucket));
219  MARK_DEFINED (avail, sizeof(void*));
220  avail->next = 0;
221  MARK_UNDEFINED (avail, sizeof(void*));
222  m_buckets[bucket] = avail;
223  }
224  // fast path.
225  struct Available *avail = m_buckets[bucket];
226  MARK_DEFINED (avail, sizeof(void*));
227  m_buckets[bucket] = avail->next;
228  MARK_UNDEFINED (avail, sizeof(void*));
229  REPORT_MALLOC (avail, size);
230  return (uint8_t*)avail;
231  }
232  else
233  {
234  MmapAlloc (size);
235  uint8_t *buffer = Brk (size);
236  REPORT_MALLOC (buffer, size);
237  return buffer;
238  }
239 }
240 void
241 KingsleyAlloc::Free (uint8_t *buffer, uint32_t size)
242 {
243  NS_LOG_FUNCTION (this << (void*)buffer << size);
244  if (size < m_defaultMmapSize)
245  {
246  // return to bucket list.
247  uint8_t bucket = SizeToBucket (size);
248  struct Available *avail = (struct Available *)buffer;
249  avail->next = m_buckets[bucket];
250  m_buckets[bucket] = avail;
251  REPORT_FREE (buffer);
252  }
253  else
254  {
255  for (std::list<struct KingsleyAlloc::MmapChunk>::iterator i = m_chunks.begin ();
256  i != m_chunks.end (); ++i)
257  {
258  if (i->mmap->buffer == buffer && i->mmap->size == size)
259  {
260  REPORT_FREE (buffer);
261  MmapFree (buffer, size);
262  m_chunks.erase (i);
263  return;
264  }
265  }
266  // this should never happen but it happens in case of a double-free
267  REPORT_FREE (buffer);
268  }
269 }
270 uint8_t *
271 KingsleyAlloc::Realloc (uint8_t *oldBuffer, uint32_t oldSize, uint32_t newSize)
272 {
273  NS_LOG_FUNCTION (this << (void*)oldBuffer << oldSize << newSize);
274  if (newSize < oldSize)
275  {
276  return oldBuffer;
277  }
278  uint8_t *newBuffer = Malloc (newSize);
279  memcpy (newBuffer, oldBuffer, oldSize);
280  Free (oldBuffer, oldSize);
281  return newBuffer;
282 }