GCC Code Coverage Report
Directory: ../ Exec Total Coverage
File: /home/iojs/build/workspace/node-test-commit-linux-coverage-daily/nodes/benchmark/out/../src/large_pages/node_large_page.cc Lines: 83 104 79.8 %
Date: 2021-02-11 04:11:15 Branches: 29 66 43.9 %

Line Branch Exec Source
1
// Copyright (C) 2018 Intel Corporation
2
//
3
// Permission is hereby granted, free of charge, to any person obtaining a copy
4
// of this software and associated documentation files (the "Software"),
5
// to deal in the Software without restriction, including without limitation
6
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
7
// and/or sell copies of the Software, and to permit persons to whom
8
// the Software is furnished to do so, subject to the following conditions:
9
//
10
// The above copyright notice and this permission notice shall be included
11
// in all copies or substantial portions of the Software.
12
//
13
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
16
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES
17
// OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
18
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
19
// OR OTHER DEALINGS IN THE SOFTWARE.
20
//
21
// SPDX-License-Identifier: MIT
22
23
// The functions in this file map the .text section of Node.js into 2MB pages.
24
// They perform the following steps:
25
//
26
// 1: Find the Node.js binary's `.text` section in memory. This is done below in
27
//    `FindNodeTextRegion`. It is accomplished in a platform-specific way. On
28
//    Linux and FreeBSD, `dl_iterate_phdr(3)` is used. When the region is found,
29
//    it is "trimmed" as follows:
30
//    * Modify the start to point to the very beginning of the Node.js `.text`
31
//      section (from symbol `__node_text_start` declared in node_text_start.S).
32
//    * Possibly modify the end to account for the `lpstub` section which
33
//      contains `MoveTextRegionToLargePages`, the function we do not wish to
34
//      move (see below).
35
//    * Align the address of the start to its nearest higher large page
36
//      boundary.
37
//    * Align the address of the end to its nearest lower large page boundary.
38
//
39
// 2: Move the text region to large pages. This is done below in
40
//    `MoveTextRegionToLargePages`. We need to be very careful:
41
//    a) `MoveTextRegionToLargePages` itself should not be moved.
42
//       We use gcc attributes
43
//       (__section__) to put it outside the `.text` section,
44
//       (__aligned__) to align it at the 2M boundary, and
45
//       (__noline__) to not inline this function.
46
//    b) `MoveTextRegionToLargePages` should not call any function(s) that might
47
//       be moved.
48
//    To move the .text section, perform the following steps:
49
//      * Map a new, temporary area and copy the original code there.
50
//      * Use mmap using the start address with MAP_FIXED so we get exactly the
51
//        same virtual address (except on OSX). On platforms other than Linux,
52
//        use mmap flags to request hugepages.
53
//      * On Linux use madvise with MADV_HUGEPAGE to use anonymous 2MB pages.
54
//      * If successful copy the code to the newly mapped area and protect it to
55
//        be readable and executable.
56
//      * Unmap the temporary area.
57
58
#include "node_large_page.h"
59
60
#include <cerrno>   // NOLINT(build/include)
61
62
// Besides returning ENOTSUP at runtime we do nothing if this define is missing.
63
#if defined(NODE_ENABLE_LARGE_CODE_PAGES) && NODE_ENABLE_LARGE_CODE_PAGES
64
#include "debug_utils-inl.h"
65
66
#if defined(__linux__) || defined(__FreeBSD__)
67
#if defined(__linux__)
68
#ifndef _GNU_SOURCE
69
#define _GNU_SOURCE
70
#endif  // ifndef _GNU_SOURCE
71
#elif defined(__FreeBSD__)
72
#include "uv.h"  // uv_exepath
73
#endif  // defined(__linux__)
74
#include <link.h>
75
#endif  // defined(__linux__) || defined(__FreeBSD__)
76
77
#include <sys/types.h>
78
#include <sys/mman.h>
79
#if defined(__FreeBSD__)
80
#include <sys/sysctl.h>
81
#elif defined(__APPLE__)
82
#include <mach/vm_map.h>
83
#endif
84
85
#include <climits>  // PATH_MAX
86
#include <cstdlib>
87
#include <cstdint>
88
#include <cstring>
89
#include <string>
90
#include <fstream>
91
92
#if defined(__linux__) || defined(__FreeBSD__)
93
extern "C" {
94
// This symbol must be declared weak because this file becomes part of all
95
// Node.js targets (like node_mksnapshot, node_mkcodecache, and cctest) and
96
// those files do not supply the symbol.
97
extern char __attribute__((weak)) __node_text_start;
98
extern char __start_lpstub;
99
}  // extern "C"
100
#endif  // defined(__linux__) || defined(__FreeBSD__)
101
102
#endif  // defined(NODE_ENABLE_LARGE_CODE_PAGES) && NODE_ENABLE_LARGE_CODE_PAGES
103
namespace node {
104
#if defined(NODE_ENABLE_LARGE_CODE_PAGES) && NODE_ENABLE_LARGE_CODE_PAGES
105
106
namespace {
107
108
struct text_region {
109
  char* from = nullptr;
110
  char* to = nullptr;
111
  bool found_text_region = false;
112
};
113
114
static const size_t hps = 2L * 1024 * 1024;
115
116
template <typename... Args>
117
4
inline void Debug(std::string fmt, Args&&... args) {
118
8
  node::Debug(&per_process::enabled_debug_list,
119
              DebugCategory::HUGEPAGES,
120
              (std::string("Hugepages info: ") + fmt).c_str(),
121
              std::forward<Args>(args)...);
122
4
}
123
124
inline void PrintWarning(const char* warn) {
125
  fprintf(stderr, "Hugepages WARNING: %s\n", warn);
126
}
127
128
inline void PrintSystemError(int error) {
129
  PrintWarning(strerror(error));
130
}
131
132
1
inline uintptr_t hugepage_align_up(uintptr_t addr) {
133
1
  return (((addr) + (hps) - 1) & ~((hps) - 1));
134
}
135
136
1
inline uintptr_t hugepage_align_down(uintptr_t addr) {
137
1
  return ((addr) & ~((hps) - 1));
138
}
139
140
#if defined(__linux__) || defined(__FreeBSD__)
141
#if defined(__FreeBSD__)
142
#ifndef ElfW
143
#define ElfW(name) Elf_##name
144
#endif  // ifndef ElfW
145
#endif  // defined(__FreeBSD__)
146
147
2
struct dl_iterate_params {
148
  uintptr_t start = 0;
149
  uintptr_t end = 0;
150
  uintptr_t reference_sym = reinterpret_cast<uintptr_t>(&__node_text_start);
151
  std::string exename;
152
};
153
154
1
int FindMapping(struct dl_phdr_info* info, size_t, void* data) {
155
1
  auto dl_params = static_cast<dl_iterate_params*>(data);
156
1
  if (dl_params->exename == std::string(info->dlpi_name)) {
157
3
    for (int idx = 0; idx < info->dlpi_phnum; idx++) {
158
3
      const ElfW(Phdr)* phdr = &info->dlpi_phdr[idx];
159

3
      if (phdr->p_type == PT_LOAD && (phdr->p_flags & PF_X)) {
160
1
        uintptr_t start = info->dlpi_addr + phdr->p_vaddr;
161
1
        uintptr_t end = start + phdr->p_memsz;
162
163

2
        if (dl_params->reference_sym >= start &&
164
1
            dl_params->reference_sym <= end) {
165
1
          dl_params->start = start;
166
1
          dl_params->end = end;
167
1
          return 1;
168
        }
169
      }
170
    }
171
  }
172
  return 0;
173
}
174
#endif  // defined(__linux__) || defined(__FreeBSD__)
175
176
1
struct text_region FindNodeTextRegion() {
177
1
  struct text_region nregion;
178
#if defined(__linux__) || defined(__FreeBSD__)
179
2
  dl_iterate_params dl_params;
180
1
  uintptr_t lpstub_start = reinterpret_cast<uintptr_t>(&__start_lpstub);
181
182
#if defined(__FreeBSD__)
183
  // On FreeBSD we need the name of the binary, because `dl_iterate_phdr` does
184
  // not pass in an empty string as the `dlpi_name` of the binary but rather its
185
  // absolute path.
186
  {
187
    char selfexe[PATH_MAX];
188
    size_t count = sizeof(selfexe);
189
    if (uv_exepath(selfexe, &count))
190
      return nregion;
191
    dl_params.exename = std::string(selfexe, count);
192
  }
193
#endif  // defined(__FreeBSD__)
194
195
1
  if (dl_iterate_phdr(FindMapping, &dl_params) == 1) {
196
2
    Debug("start: %p - sym: %p - end: %p\n",
197
2
          reinterpret_cast<void*>(dl_params.start),
198
2
          reinterpret_cast<void*>(dl_params.reference_sym),
199
3
          reinterpret_cast<void*>(dl_params.end));
200
201
1
    dl_params.start = dl_params.reference_sym;
202

1
    if (lpstub_start > dl_params.start && lpstub_start <= dl_params.end) {
203
2
      Debug("Trimming end for lpstub: %p\n",
204
3
            reinterpret_cast<void*>(lpstub_start));
205
1
      dl_params.end = lpstub_start;
206
    }
207
208
1
    if (dl_params.start < dl_params.end) {
209
1
      char* from = reinterpret_cast<char*>(hugepage_align_up(dl_params.start));
210
1
      char* to = reinterpret_cast<char*>(hugepage_align_down(dl_params.end));
211
1
      Debug("Aligned range is %p - %p\n", from, to);
212
1
      if (from < to) {
213
1
        size_t pagecount = (to - from) / hps;
214
1
        if (pagecount > 0) {
215
1
          nregion.found_text_region = true;
216
1
          nregion.from = from;
217
1
          nregion.to = to;
218
        }
219
      }
220
    }
221
  }
222
#elif defined(__APPLE__)
223
  struct vm_region_submap_info_64 map;
224
  mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
225
  vm_address_t addr = 0UL;
226
  vm_size_t size = 0;
227
  natural_t depth = 1;
228
229
  while (true) {
230
    if (vm_region_recurse_64(mach_task_self(), &addr, &size, &depth,
231
                             reinterpret_cast<vm_region_info_64_t>(&map),
232
                             &count) != KERN_SUCCESS) {
233
      break;
234
    }
235
236
    if (map.is_submap) {
237
      depth++;
238
    } else {
239
      char* start = reinterpret_cast<char*>(hugepage_align_up(addr));
240
      char* end = reinterpret_cast<char*>(hugepage_align_down(addr+size));
241
242
      if (end > start && (map.protection & VM_PROT_READ) != 0 &&
243
          (map.protection & VM_PROT_EXECUTE) != 0) {
244
        nregion.found_text_region = true;
245
        nregion.from = start;
246
        nregion.to = end;
247
        break;
248
      }
249
250
      addr += size;
251
      size = 0;
252
    }
253
  }
254
#endif
255
1
  Debug("Found %d huge pages\n", (nregion.to - nregion.from) / hps);
256
2
  return nregion;
257
}
258
259
#if defined(__linux__)
260
1
bool IsTransparentHugePagesEnabled() {
261
2
  std::ifstream ifs;
262
263
  // File format reference:
264
  // https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/mm/huge_memory.c?id=13391c60da3308ed9980de0168f74cce6c62ac1d#n163
265
1
  ifs.open("/sys/kernel/mm/transparent_hugepage/enabled");
266
1
  if (!ifs) {
267
    PrintWarning("could not open /sys/kernel/mm/transparent_hugepage/enabled");
268
    return false;
269
  }
270
271
2
  std::string always, madvise;
272
1
  if (ifs.is_open()) {
273
1
    ifs >> always >> madvise;
274
  }
275
1
  ifs.close();
276
277

1
  return always == "[always]" || madvise == "[madvise]";
278
}
279
#elif defined(__FreeBSD__)
280
bool IsSuperPagesEnabled() {
281
  // It is enabled by default on amd64.
282
  unsigned int super_pages = 0;
283
  size_t super_pages_length = sizeof(super_pages);
284
  return sysctlbyname("vm.pmap.pg_ps_enabled",
285
                      &super_pages,
286
                      &super_pages_length,
287
                      nullptr,
288
                      0) != -1 &&
289
         super_pages >= 1;
290
}
291
#endif
292
293
// Functions in this class must always be inlined because they must end up in
294
// the `lpstub` section rather than the `.text` section.
295
class MemoryMapPointer {
296
 public:
297
1
  FORCE_INLINE explicit MemoryMapPointer() {}
298
  FORCE_INLINE bool operator==(void* rhs) const { return mem_ == rhs; }
299
5
  FORCE_INLINE void* mem() const { return mem_; }
300
  MemoryMapPointer(const MemoryMapPointer&) = delete;
301
  MemoryMapPointer(MemoryMapPointer&&) = delete;
302
  void operator= (const MemoryMapPointer&) = delete;
303
  void operator= (const MemoryMapPointer&&) = delete;
304
  FORCE_INLINE void Reset(void* start,
305
                          size_t size,
306
                          int prot,
307
                          int flags,
308
                          int fd = -1,
309
                          size_t offset = 0) {
310
2
    mem_ = mmap(start, size, prot, flags, fd, offset);
311
2
    size_ = size;
312
  }
313
  FORCE_INLINE void Reset() {
314
1
    mem_ = nullptr;
315
1
    size_ = 0;
316
  }
317
  FORCE_INLINE ~MemoryMapPointer() {
318

3
    if (mem_ == nullptr) return;
319

1
    if (mem_ == MAP_FAILED) return;
320

1
    if (munmap(mem_, size_) == 0) return;
321
    PrintSystemError(errno);
322
  }
323
324
 private:
325
  size_t size_ = 0;
326
  void* mem_ = nullptr;
327
};
328
329
}  // End of anonymous namespace
330
331
int
332
#if !defined(__APPLE__)
333
__attribute__((__section__("lpstub")))
334
#else
335
__attribute__((__section__("__TEXT,__lpstub")))
336
#endif
337
__attribute__((__aligned__(hps)))
338
__attribute__((__noinline__))
339
1
MoveTextRegionToLargePages(const text_region& r) {
340
  MemoryMapPointer nmem;
341
  MemoryMapPointer tmem;
342
1
  void* start = r.from;
343
1
  size_t size = r.to - r.from;
344
345
  // Allocate a temporary region and back up the code we will re-map.
346
  nmem.Reset(nullptr, size,
347
             PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS);
348
1
  if (nmem.mem() == MAP_FAILED) goto fail;
349
2
  memcpy(nmem.mem(), r.from, size);
350
351
#if defined(__linux__)
352
// We already know the original page is r-xp
353
// (PROT_READ, PROT_EXEC, MAP_PRIVATE)
354
// We want PROT_WRITE because we are writing into it.
355
// We want it at the fixed address and we use MAP_FIXED.
356
  tmem.Reset(start, size,
357
             PROT_READ | PROT_WRITE | PROT_EXEC,
358
             MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED);
359
1
  if (tmem.mem() == MAP_FAILED) goto fail;
360
1
  if (madvise(tmem.mem(), size, 14 /* MADV_HUGEPAGE */) == -1) goto fail;
361
1
  memcpy(start, nmem.mem(), size);
362
#elif defined(__FreeBSD__)
363
  tmem.Reset(start, size,
364
             PROT_READ | PROT_WRITE | PROT_EXEC,
365
             MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED |
366
             MAP_ALIGNED_SUPER);
367
  if (tmem.mem() == MAP_FAILED) goto fail;
368
  memcpy(start, nmem.mem(), size);
369
#elif defined(__APPLE__)
370
  // There is not enough room to reserve the mapping close
371
  // to the region address so we content to give a hint
372
  // without forcing the new address being closed to.
373
  // We explicitally gives all permission since we plan
374
  // to write into it.
375
  tmem.Reset(start, size,
376
             PROT_READ | PROT_WRITE | PROT_EXEC,
377
             MAP_PRIVATE | MAP_ANONYMOUS,
378
             VM_FLAGS_SUPERPAGE_SIZE_2MB);
379
  if (tmem.mem() == MAP_FAILED) goto fail;
380
  memcpy(tmem.mem(), nmem.mem(), size);
381
  if (mprotect(start, size, PROT_READ | PROT_WRITE | PROT_EXEC) == -1)
382
    goto fail;
383
  memcpy(start, tmem.mem(), size);
384
#endif
385
386
1
  if (mprotect(start, size, PROT_READ | PROT_EXEC) == -1) goto fail;
387
388
  // We need not `munmap(tmem, size)` on success.
389
  tmem.Reset();
390
1
  return 0;
391
fail:
392
  PrintSystemError(errno);
393
  return -1;
394
}
395
#endif  // defined(NODE_ENABLE_LARGE_CODE_PAGES) && NODE_ENABLE_LARGE_CODE_PAGES
396
397
// This is the primary API called from main.
398
1
int MapStaticCodeToLargePages() {
399
#if defined(NODE_ENABLE_LARGE_CODE_PAGES) && NODE_ENABLE_LARGE_CODE_PAGES
400
1
  bool have_thp = false;
401
#if defined(__linux__)
402
1
  have_thp = IsTransparentHugePagesEnabled();
403
#elif defined(__FreeBSD__)
404
  have_thp = IsSuperPagesEnabled();
405
#elif defined(__APPLE__)
406
  // pse-36 flag is present in recent mac x64 products.
407
  have_thp = true;
408
#endif
409
1
  if (!have_thp)
410
    return EACCES;
411
412
1
  struct text_region r = FindNodeTextRegion();
413
1
  if (r.found_text_region == false)
414
    return ENOENT;
415
416
1
  return MoveTextRegionToLargePages(r);
417
#else
418
  return ENOTSUP;
419
#endif
420
}
421
422
const char* LargePagesError(int status) {
423
  switch (status) {
424
    case ENOTSUP:
425
      return "Mapping to large pages is not supported.";
426
427
    case EACCES:
428
      return "Large pages are not enabled.";
429
430
    case ENOENT:
431
      return "failed to find text region";
432
433
    case -1:
434
      return "Mapping code to large pages failed. Reverting to default page "
435
          "size.";
436
437
    case 0:
438
      return "OK";
439
440
    default:
441
      return "Unknown error";
442
  }
443
}
444
445

14097
}  // namespace node