|
| 1 | +package core |
| 2 | + |
| 3 | +import ( |
| 4 | + "errors" |
| 5 | + "sort" |
| 6 | + "sync" |
| 7 | + "unsafe" |
| 8 | +) |
| 9 | + |
| 10 | +var ( |
| 11 | + ErrSlabNoCacheFound = errors.New("no slab cache matching request") |
| 12 | + ErrSlabTooLarge = errors.New("requested size too large") |
| 13 | +) |
| 14 | + |
| 15 | +type SlabAllocatorConfig struct { |
| 16 | + MinCanarySize int |
| 17 | + Sizes []int |
| 18 | +} |
| 19 | + |
| 20 | +// Configuration options |
| 21 | +type SlabOption func(*SlabAllocatorConfig) |
| 22 | + |
| 23 | +// WithSizes allows to overwrite the SLAB Page sizes, defaulting to |
| 24 | +// 64, 128, 256, 512, 1024 and 2048 byte |
| 25 | +func WithSizes(sizes []int) SlabOption { |
| 26 | + return func(cfg *SlabAllocatorConfig) { |
| 27 | + cfg.Sizes = sizes |
| 28 | + } |
| 29 | +} |
| 30 | + |
| 31 | +// WithMinCanarySize allows to specify the minimum canary size (default: 16 byte) |
| 32 | +func WithMinCanarySize(size int) SlabOption { |
| 33 | + return func(cfg *SlabAllocatorConfig) { |
| 34 | + cfg.MinCanarySize = size |
| 35 | + } |
| 36 | +} |
| 37 | + |
| 38 | +// Memory allocator implementation |
| 39 | +type slabAllocator struct { |
| 40 | + maxSlabSize int |
| 41 | + stats *MemStats |
| 42 | + cfg *SlabAllocatorConfig |
| 43 | + allocator *pageAllocator |
| 44 | + slabs []*slab |
| 45 | +} |
| 46 | + |
| 47 | +func NewSlabAllocator(options ...SlabOption) MemAllocator { |
| 48 | + cfg := &SlabAllocatorConfig{ |
| 49 | + MinCanarySize: 16, |
| 50 | + Sizes: []int{64, 128, 256, 512, 1024, 2048}, |
| 51 | + } |
| 52 | + for _, o := range options { |
| 53 | + o(cfg) |
| 54 | + } |
| 55 | + sort.Ints(cfg.Sizes) |
| 56 | + |
| 57 | + if len(cfg.Sizes) == 0 { |
| 58 | + return nil |
| 59 | + } |
| 60 | + |
| 61 | + // Setup the allocator and initialize the slabs |
| 62 | + a := &slabAllocator{ |
| 63 | + maxSlabSize: cfg.Sizes[len(cfg.Sizes)-1], |
| 64 | + stats: &MemStats{}, |
| 65 | + cfg: cfg, |
| 66 | + slabs: make([]*slab, 0, len(cfg.Sizes)), |
| 67 | + allocator: &pageAllocator{ |
| 68 | + objects: make(map[int]*pageObject), |
| 69 | + stats: &MemStats{}, |
| 70 | + }, |
| 71 | + } |
| 72 | + for _, size := range cfg.Sizes { |
| 73 | + s := &slab{ |
| 74 | + objSize: size, |
| 75 | + stats: a.stats, |
| 76 | + allocator: a.allocator, |
| 77 | + } |
| 78 | + a.slabs = append(a.slabs, s) |
| 79 | + } |
| 80 | + |
| 81 | + return a |
| 82 | +} |
| 83 | + |
| 84 | +func (a *slabAllocator) Alloc(size int) ([]byte, error) { |
| 85 | + if size < 1 { |
| 86 | + return nil, ErrNullAlloc |
| 87 | + } |
| 88 | + |
| 89 | + // If the requested size is bigger than the largest slab, just malloc |
| 90 | + // the memory. |
| 91 | + requiredSlabSize := size + a.cfg.MinCanarySize |
| 92 | + if requiredSlabSize > a.maxSlabSize { |
| 93 | + return a.allocator.Alloc(size) |
| 94 | + } |
| 95 | + |
| 96 | + // Determine which slab to use depending on the size |
| 97 | + var s *slab |
| 98 | + for _, current := range a.slabs { |
| 99 | + if requiredSlabSize <= current.objSize { |
| 100 | + s = current |
| 101 | + break |
| 102 | + } |
| 103 | + } |
| 104 | + if s == nil { |
| 105 | + return nil, ErrSlabNoCacheFound |
| 106 | + } |
| 107 | + buf, err := s.alloc(size) |
| 108 | + if err != nil { |
| 109 | + return nil, err |
| 110 | + } |
| 111 | + |
| 112 | + // Trunc the buffer to the required size if requested |
| 113 | + return buf, nil |
| 114 | +} |
| 115 | + |
| 116 | +func (a *slabAllocator) Protect(buf []byte, readonly bool) error { |
| 117 | + // For the slab allocator, the data-slice is not identical to a memory page. |
| 118 | + // However, protection rules can only be applied to whole memory pages, |
| 119 | + // therefore protection of the data-slice is not supported by the slab |
| 120 | + // allocator. |
| 121 | + return nil |
| 122 | +} |
| 123 | + |
| 124 | +func (a *slabAllocator) Inner(buf []byte) []byte { |
| 125 | + if len(buf) == 0 { |
| 126 | + return nil |
| 127 | + } |
| 128 | + |
| 129 | + // If the buffer size is bigger than the largest slab, just free |
| 130 | + // the memory. |
| 131 | + size := len(buf) + a.cfg.MinCanarySize |
| 132 | + if size > a.maxSlabSize { |
| 133 | + return a.allocator.Inner(buf) |
| 134 | + } |
| 135 | + |
| 136 | + // Determine which slab to use depending on the size |
| 137 | + var s *slab |
| 138 | + for _, current := range a.slabs { |
| 139 | + if size <= current.objSize { |
| 140 | + s = current |
| 141 | + break |
| 142 | + } |
| 143 | + } |
| 144 | + if s == nil { |
| 145 | + Panic(ErrSlabNoCacheFound) |
| 146 | + } |
| 147 | + |
| 148 | + for _, c := range s.pages { |
| 149 | + if offset, contained := contains(c.buffer, buf); contained { |
| 150 | + return c.buffer[offset : offset+s.objSize] |
| 151 | + } |
| 152 | + } |
| 153 | + return nil |
| 154 | +} |
| 155 | + |
| 156 | +func (a *slabAllocator) Free(buf []byte) error { |
| 157 | + size := len(buf) + a.cfg.MinCanarySize |
| 158 | + |
| 159 | + // If the buffer size is bigger than the largest slab, just free |
| 160 | + // the memory. |
| 161 | + if size > a.maxSlabSize { |
| 162 | + return a.allocator.Free(buf) |
| 163 | + } |
| 164 | + |
| 165 | + // Determine which slab to use depending on the size |
| 166 | + var s *slab |
| 167 | + for _, current := range a.slabs { |
| 168 | + if size <= current.objSize { |
| 169 | + s = current |
| 170 | + break |
| 171 | + } |
| 172 | + } |
| 173 | + if s == nil { |
| 174 | + return ErrSlabNoCacheFound |
| 175 | + } |
| 176 | + |
| 177 | + return s.free(buf) |
| 178 | +} |
| 179 | + |
| 180 | +func (a *slabAllocator) Stats() *MemStats { |
| 181 | + return a.stats |
| 182 | +} |
| 183 | + |
| 184 | +// *** INTERNAL FUNCTIONS *** // |
| 185 | + |
| 186 | +// Page implementation |
| 187 | +type slabObject struct { |
| 188 | + offset int |
| 189 | + next *slabObject |
| 190 | +} |
| 191 | + |
| 192 | +type slabPage struct { |
| 193 | + used int |
| 194 | + head *slabObject |
| 195 | + canary []byte |
| 196 | + buffer []byte |
| 197 | +} |
| 198 | + |
| 199 | +func newPage(page []byte, size int) *slabPage { |
| 200 | + if size > len(page) || size < 1 { |
| 201 | + Panic(ErrSlabTooLarge) |
| 202 | + } |
| 203 | + |
| 204 | + // Determine the number of objects fitting into the page |
| 205 | + count := len(page) / size |
| 206 | + |
| 207 | + // Init the Page meta-data |
| 208 | + c := &slabPage{ |
| 209 | + head: &slabObject{}, |
| 210 | + canary: page[len(page)-size:], |
| 211 | + buffer: page, |
| 212 | + } |
| 213 | + |
| 214 | + // Use the last object to create a canary prototype |
| 215 | + if err := Scramble(c.canary); err != nil { |
| 216 | + Panic(err) |
| 217 | + } |
| 218 | + |
| 219 | + // Initialize the objects |
| 220 | + last := c.head |
| 221 | + offset := size |
| 222 | + for i := 1; i < count-1; i++ { |
| 223 | + obj := &slabObject{offset: offset} |
| 224 | + last.next = obj |
| 225 | + offset += size |
| 226 | + last = obj |
| 227 | + } |
| 228 | + |
| 229 | + return c |
| 230 | +} |
| 231 | + |
| 232 | +// Slab is a container for all Pages serving the same size |
| 233 | +type slab struct { |
| 234 | + objSize int |
| 235 | + stats *MemStats |
| 236 | + allocator *pageAllocator |
| 237 | + pages []*slabPage |
| 238 | + sync.Mutex |
| 239 | +} |
| 240 | + |
| 241 | +func (s *slab) alloc(size int) ([]byte, error) { |
| 242 | + s.Lock() |
| 243 | + defer s.Unlock() |
| 244 | + |
| 245 | + // Find the fullest Page that isn't completely filled |
| 246 | + var c *slabPage |
| 247 | + for _, current := range s.pages { |
| 248 | + if current.head != nil && (c == nil || current.used > c.used) { |
| 249 | + c = current |
| 250 | + } |
| 251 | + } |
| 252 | + |
| 253 | + // No Page available, create a new one |
| 254 | + if c == nil { |
| 255 | + // Use the page allocator to get a new guarded memory page |
| 256 | + page, err := s.allocator.Alloc(pageSize - s.objSize) |
| 257 | + if err != nil { |
| 258 | + s.stats.PageAllocErrors.Add(1) |
| 259 | + return nil, err |
| 260 | + } |
| 261 | + s.stats.PageAllocs.Store(s.allocator.stats.PageAllocs.Load()) |
| 262 | + c = newPage(page, s.objSize) |
| 263 | + s.pages = append(s.pages, c) |
| 264 | + } |
| 265 | + |
| 266 | + // Remove the object from the free-list and increase the usage count |
| 267 | + obj := c.head |
| 268 | + c.head = c.head.next |
| 269 | + c.used++ |
| 270 | + |
| 271 | + s.stats.ObjectAllocs.Add(1) |
| 272 | + data := getBufferPart(c.buffer, obj.offset, size) |
| 273 | + canary := getBufferPart(c.buffer, obj.offset+size, s.objSize-size) |
| 274 | + |
| 275 | + // Fill in the remaining bytes with canary |
| 276 | + Copy(canary, c.canary) |
| 277 | + |
| 278 | + return data, nil |
| 279 | +} |
| 280 | + |
| 281 | +func contains(buf, obj []byte) (int, bool) { |
| 282 | + bb := uintptr(unsafe.Pointer(&buf[0])) |
| 283 | + be := uintptr(unsafe.Pointer(&buf[len(buf)-1])) |
| 284 | + o := uintptr(unsafe.Pointer(&obj[0])) |
| 285 | + |
| 286 | + if bb <= be { |
| 287 | + return int(o - bb), bb <= o && o < be |
| 288 | + } |
| 289 | + return int(o - be), be <= o && o < bb |
| 290 | +} |
| 291 | + |
| 292 | +func (s *slab) free(buf []byte) error { |
| 293 | + s.Lock() |
| 294 | + defer s.Unlock() |
| 295 | + |
| 296 | + // Find the Page containing the object |
| 297 | + var c *slabPage |
| 298 | + var cidx, offset int |
| 299 | + for i, current := range s.pages { |
| 300 | + diff, contained := contains(current.buffer, buf) |
| 301 | + if contained { |
| 302 | + c = current |
| 303 | + cidx = i |
| 304 | + offset = diff |
| 305 | + break |
| 306 | + } |
| 307 | + } |
| 308 | + if c == nil { |
| 309 | + return ErrBufferNotOwnedByAllocator |
| 310 | + } |
| 311 | + |
| 312 | + s.stats.ObjectFrees.Add(1) |
| 313 | + |
| 314 | + // Wipe the buffer including the canary check |
| 315 | + if err := s.wipe(c, offset, len(buf)); err != nil { |
| 316 | + s.stats.ObjectFreeErrors.Add(1) |
| 317 | + return err |
| 318 | + } |
| 319 | + obj := &slabObject{ |
| 320 | + offset: offset, |
| 321 | + next: c.head, |
| 322 | + } |
| 323 | + c.head = obj |
| 324 | + c.used-- |
| 325 | + |
| 326 | + // In case the Page is completely empty, we should remove it and |
| 327 | + // free the underlying memory |
| 328 | + if c.used == 0 { |
| 329 | + err := s.allocator.Free(c.buffer) |
| 330 | + s.stats.PageFrees.Store(s.allocator.stats.PageFrees.Load()) |
| 331 | + if err != nil { |
| 332 | + s.stats.PageFreeErrors.Add(1) |
| 333 | + return err |
| 334 | + } |
| 335 | + |
| 336 | + s.pages = append(s.pages[:cidx], s.pages[cidx+1:]...) |
| 337 | + } |
| 338 | + |
| 339 | + return nil |
| 340 | +} |
| 341 | + |
| 342 | +func (s *slab) wipe(page *slabPage, offset, size int) error { |
| 343 | + canary := getBufferPart(page.buffer, -s.objSize, s.objSize) |
| 344 | + inner := getBufferPart(page.buffer, offset, s.objSize) |
| 345 | + data := getBufferPart(page.buffer, offset, size) |
| 346 | + |
| 347 | + // Wipe data field |
| 348 | + Wipe(data) |
| 349 | + |
| 350 | + // Verify the canary |
| 351 | + if !Equal(inner[len(data):], canary[:size]) { |
| 352 | + return ErrBufferOverflow |
| 353 | + } |
| 354 | + |
| 355 | + // Wipe the memory |
| 356 | + Wipe(inner) |
| 357 | + |
| 358 | + return nil |
| 359 | +} |
0 commit comments