summaryrefslogtreecommitdiff
path: root/src/space.cc
blob: c3d763c4fe96001e4424a5c5fdc5f816b17ff826 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
// Copyright 2011 Google Inc. All Rights Reserved.

#include "space.h"

#include <sys/mman.h>

#include "logging.h"
#include "mspace.h"
#include "scoped_ptr.h"
#include "utils.h"

namespace art {

Space* Space::Create(size_t startup_size, size_t maximum_size) {
  scoped_ptr<Space> space(new Space(startup_size, maximum_size));
  bool success = space->Init();
  if (!success) {
    return NULL;
  } else {
    return space.release();
  }
}

void* Space::CreateMallocSpace(void* base,
                               size_t startup_size,
                               size_t maximum_size) {
  errno = 0;
  bool is_locked = false;
  size_t commit_size = startup_size / 2;
  void* msp = create_contiguous_mspace_with_base(commit_size, maximum_size,
                                                 is_locked, base);
  if (msp != NULL) {
    // Do not permit the heap grow past the starting size without our
    // intervention.
    mspace_set_max_allowed_footprint(msp, startup_size);
  } else {
    // There is no guarantee that errno has meaning when the call
    // fails, but it often does.
    PLOG(ERROR) << "create_contiguous_mspace_with_base failed";
  }
  return msp;
}

bool Space::Init() {
  if (!(startup_size_ <= maximum_size_)) {
    return false;
  }
  size_t length = RoundUp(maximum_size_, kPageSize);
  int prot = PROT_READ | PROT_WRITE;
  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
  mem_map_.reset(MemMap::Map(length, prot, flags));
  if (mem_map_ == NULL) {
    PLOG(ERROR) << "mmap failed";
    return false;
  }
  base_ = mem_map_->GetAddress();
  limit_ = base_ + length;
  mspace_ = CreateMallocSpace(base_, startup_size_, maximum_size_);
  if (mspace_ == NULL) {
    mem_map_->Unmap();
    return false;
  }
  return true;
}

Space::~Space() {}

Object* Space::AllocWithoutGrowth(size_t num_bytes) {
  return reinterpret_cast<Object*>(mspace_calloc(mspace_, 1, num_bytes));
}

Object* Space::AllocWithGrowth(size_t num_bytes) {
  // Grow as much as possible within the mspace.
  size_t max_allowed = maximum_size_;
  mspace_set_max_allowed_footprint(mspace_, max_allowed);
  // Try the allocation.
  void* ptr = AllocWithoutGrowth(num_bytes);
  // Shrink back down as small as possible.
  size_t footprint = mspace_footprint(mspace_);
  mspace_set_max_allowed_footprint(mspace_, footprint);
  // Return the new allocation or NULL.
  return reinterpret_cast<Object*>(ptr);
}

size_t Space::Free(void* ptr) {
  DCHECK(ptr != NULL);
  size_t num_bytes = mspace_usable_size(mspace_, ptr);
  mspace_free(mspace_, ptr);
  return num_bytes;
}

size_t Space::AllocationSize(const Object* obj) {
  return mspace_usable_size(mspace_, obj) + kChunkOverhead;
}

void Space::DontNeed(void* start, void* end, void* num_bytes) {
  start = (void*)RoundUp((uintptr_t)start, kPageSize);
  end = (void*)RoundDown((uintptr_t)end, kPageSize);
  if (start >= end) {
    return;
  }
  size_t length = reinterpret_cast<byte*>(end) - reinterpret_cast<byte*>(start);
  int result = madvise(start, length, MADV_DONTNEED);
  if (result == -1) {
    PLOG(WARNING) << "madvise failed";
  } else {
    *reinterpret_cast<size_t*>(num_bytes) += length;
  }
}

void Space::Trim() {
  CHECK(mspace_ != NULL);
  mspace_trim(mspace_, 0);
  size_t num_bytes_released = 0;
  mspace_walk_free_pages(mspace_, DontNeed, &num_bytes_released);
}

size_t Space::MaxAllowedFootprint() {
  return mspace_max_allowed_footprint(mspace_);
}

void Space::Grow(size_t new_size) {
  UNIMPLEMENTED(FATAL);
}

}  // namespace art