WebKit Bugzilla
Attachment 361436 Details for
Bug 192389
: bmalloc uses more memory on iOS compared to macOS due to physical page size differences
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Updated patch responding to review comments
192389-7.patch (text/plain), 11.57 KB, created by
Michael Saboff
on 2019-02-07 13:32:57 PST
(
hide
)
Description:
Updated patch responding to review comments
Filename:
MIME Type:
Creator:
Michael Saboff
Created:
2019-02-07 13:32:57 PST
Size:
11.57 KB
patch
obsolete
>Index: Source/bmalloc/ChangeLog >=================================================================== >--- Source/bmalloc/ChangeLog (revision 241140) >+++ Source/bmalloc/ChangeLog (working copy) >@@ -1,3 +1,34 @@ >+2019-02-07 Michael Saboff <msaboff@apple.com> >+ >+ bmalloc uses more memory on iOS compared to macOS due to physical page size differences >+ https://bugs.webkit.org/show_bug.cgi?id=192389 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Changed small line allocations to be in smallPageSize "virtual page" multiples instead of physical >+ page size increments for sizes less that the physical page size. This required changing the small >+ page commit / decommit code to work in full physical page increments. For page classes that are >+ physical page size and larger, there isn't any functional change. >+ >+ When scavenging page classes smaller than the physical page size, we need to consider whether or >+ not the adjacent small pages on the same physical page are also free before decommiting that >+ containing page. When we need to commit more memory, we commit the whole page, and add any >+ adjacent virtual pages that were fully committed as well. >+ >+ * bmalloc/Chunk.h: >+ (bmalloc::forEachPage): >+ * bmalloc/Heap.cpp: >+ (bmalloc::Heap::initializeLineMetadata): >+ (bmalloc::Heap::initializePageMetadata): >+ (bmalloc::Heap::scavenge): >+ (bmalloc::__attribute__): >+ (bmalloc::Heap::commitSmallPagesInPhysicalPage): >+ (bmalloc::Heap::allocateSmallPage): >+ (bmalloc::Heap::allocateSmallBumpRangesByMetadata): >+ * bmalloc/Heap.h: >+ * bmalloc/SmallPage.h: >+ (bmalloc::SmallPage::refCount): >+ > 2019-01-18 Keith Miller <keith_miller@apple.com> > > gigacage slide should randomize both start and end >Index: Source/bmalloc/bmalloc/Chunk.h >=================================================================== >--- Source/bmalloc/bmalloc/Chunk.h (revision 240992) >+++ Source/bmalloc/bmalloc/Chunk.h (working copy) >@@ -50,6 +50,7 @@ public: > char* address(size_t offset); > SmallPage* page(size_t offset); > SmallLine* line(size_t offset); >+ size_t pageNumber(SmallPage*); > > char* bytes() { return reinterpret_cast<char*>(this); } > SmallLine* lines() { return &m_lines[0]; } >@@ -77,7 +78,8 @@ template<typename Function> void forEach > { > // We align to at least the page size so we can service aligned allocations > // at equal and smaller powers of two, and also so we can vmDeallocatePhysicalPages(). >- size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk)); >+ size_t firstPageOffset = max(pageSize, vmPageSize()); >+ size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(firstPageOffset, sizeof(Chunk)); > > Object begin(chunk, metadataSize); > Object end(chunk, chunkSize); >Index: Source/bmalloc/bmalloc/Heap.cpp >=================================================================== >--- Source/bmalloc/bmalloc/Heap.cpp (revision 240992) >+++ Source/bmalloc/bmalloc/Heap.cpp (working copy) >@@ -44,6 +44,8 @@ > > namespace bmalloc { > >+static_assert(isPowerOfTwo(smallPageSize), ""); >+ > Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&) > : m_kind(kind) > , m_vmPageSizePhysical(vmPageSizePhysical()) >@@ -93,7 +95,7 @@ size_t Heap::gigacageSize() > void Heap::initializeLineMetadata() > { > size_t sizeClassCount = bmalloc::sizeClass(smallLineSize); >- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize; >+ size_t smallLineCount = smallPageSize / smallLineSize; > m_smallLineMetadata.grow(sizeClassCount * smallLineCount); > > for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) { >@@ -102,7 +104,7 @@ void Heap::initializeLineMetadata() > > size_t object = 0; > size_t line = 0; >- while (object < m_vmPageSizePhysical) { >+ while (object < smallPageSize) { > line = object / smallLineSize; > size_t leftover = object % smallLineSize; > >@@ -116,7 +118,7 @@ void Heap::initializeLineMetadata() > } > > // Don't allow the last object in a page to escape the page. >- if (object > m_vmPageSizePhysical) { >+ if (object > smallPageSize) { > BASSERT(pageMetadata[line].objectCount); > --pageMetadata[line].objectCount; > } >@@ -128,11 +130,18 @@ void Heap::initializePageMetadata() > auto computePageSize = [&](size_t sizeClass) { > size_t size = objectSize(sizeClass); > if (sizeClass < bmalloc::sizeClass(smallLineSize)) >- return m_vmPageSizePhysical; >+ return smallPageSize; > >- for (size_t pageSize = m_vmPageSizePhysical; >- pageSize < pageSizeMax; >- pageSize += m_vmPageSizePhysical) { >+ // We want power of 2 pageSizes sizes below physical page size and multiples of physical pages size above that. >+ size_t pageSize = smallPageSize; >+ for (; pageSize < m_vmPageSizePhysical; pageSize *= 2) { >+ RELEASE_BASSERT(pageSize <= chunkSize / 2); >+ size_t waste = pageSize % size; >+ if (waste <= pageSize / pageSizeWasteFactor) >+ return pageSize; >+ } >+ >+ for (; pageSize < pageSizeMax; pageSize += m_vmPageSizePhysical) { > RELEASE_BASSERT(pageSize <= chunkSize / 2); > size_t waste = pageSize % size; > if (waste <= pageSize / pageSizeWasteFactor) >@@ -188,14 +197,17 @@ void Heap::scavenge(std::lock_guard<Mute > continue; > > size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]); >- size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize); >- m_freeableMemory -= decommitSize; >- m_footprint -= decommitSize; >- decommitter.addEager(page->begin()->begin(), pageSize); >- page->setHasPhysicalPages(false); >-#if ENABLE_PHYSICAL_PAGE_MAP >- m_physicalPageMap.decommit(page->begin()->begin(), pageSize); >+ if (pageSize >= m_vmPageSizePhysical) { >+ size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize); >+ m_freeableMemory -= decommitSize; >+ m_footprint -= decommitSize; >+ decommitter.addEager(page->begin()->begin(), pageSize); >+ page->setHasPhysicalPages(false); >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.decommit(page->begin()->begin(), pageSize); > #endif >+ } else >+ tryDecommitSmallPagesInPhysicalPage(lock, decommitter, page, pageSize); > } > } > } >@@ -267,6 +279,63 @@ void Heap::allocateSmallChunk(std::uniqu > m_freePages[pageClass].push(chunk); > } > >+void Heap::tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, SmallPage* smallPage, size_t pageSize) >+{ >+ Chunk* chunk = Chunk::get(smallPage); >+ >+ char* pageBegin = smallPage->begin()->begin(); >+ char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, pageBegin); >+ >+ // The first page in a physical page takes care of decommitting its physical neighbors >+ if (pageBegin != physicalPageBegin) >+ return; >+ >+ size_t beginPageOffset = chunk->offset(physicalPageBegin); >+ size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical; >+ >+ Object begin(chunk, beginPageOffset); >+ Object end(chunk, endPageOffset); >+ >+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) { >+ if (it.page()->refCount(lock)) >+ return; >+ } >+ >+ size_t decommitSize = m_vmPageSizePhysical; >+ m_freeableMemory -= decommitSize; >+ m_footprint -= decommitSize; >+ >+ decommitter.addEager(physicalPageBegin, decommitSize); >+ >+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) >+ it.page()->setHasPhysicalPages(false); >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.decommit(smallPage, decommitSize); >+#endif >+} >+ >+void Heap::commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage* page, size_t pageSize) >+{ >+ Chunk* chunk = Chunk::get(page); >+ >+ char* physicalPageBegin = roundDownToMultipleOf(m_vmPageSizePhysical, page->begin()->begin()); >+ >+ size_t beginPageOffset = chunk->offset(physicalPageBegin); >+ size_t endPageOffset = beginPageOffset + m_vmPageSizePhysical; >+ >+ Object begin(chunk, beginPageOffset); >+ Object end(chunk, endPageOffset); >+ >+ m_footprint += m_vmPageSizePhysical; >+ vmAllocatePhysicalPagesSloppy(physicalPageBegin, m_vmPageSizePhysical); >+ >+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) >+ it.page()->setHasPhysicalPages(true); >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.commit(begin.page(), m_vmPageSizePhysical); >+#endif >+} >+ > void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass) > { > m_objectTypes.set(chunk, ObjectType::Large); >@@ -325,12 +394,15 @@ SmallPage* Heap::allocateSmallPage(std:: > m_freeableMemory -= physicalSize; > else { > m_scavenger->scheduleIfUnderMemoryPressure(pageSize); >- m_footprint += physicalSize; >- vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize); >- page->setHasPhysicalPages(true); >-#if ENABLE_PHYSICAL_PAGE_MAP >- m_physicalPageMap.commit(page->begin()->begin(), pageSize); >+ if (pageSize >= m_vmPageSizePhysical) { >+ m_footprint += physicalSize; >+ vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize); >+ page->setHasPhysicalPages(true); >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.commit(page->begin()->begin(), pageSize); > #endif >+ } else >+ commitSmallPagesInPhysicalPage(lock, page, pageSize); > } > > return page; >@@ -390,7 +462,7 @@ void Heap::allocateSmallBumpRangesByMeta > SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache); > SmallLine* lines = page->begin(); > BASSERT(page->hasFreeLines(lock)); >- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize; >+ size_t smallLineCount = smallPageSize / smallLineSize; > LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount]; > > auto findSmallBumpRange = [&](size_t& lineNumber) { >Index: Source/bmalloc/bmalloc/Heap.h >=================================================================== >--- Source/bmalloc/bmalloc/Heap.h (revision 240992) >+++ Source/bmalloc/bmalloc/Heap.h (working copy) >@@ -120,6 +120,9 @@ private: > SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&); > void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&); > >+ void tryDecommitSmallPagesInPhysicalPage(std::lock_guard<Mutex>&, BulkDecommit& decommitter, SmallPage*, size_t pageSize); >+ void commitSmallPagesInPhysicalPage(std::unique_lock<Mutex>&, SmallPage*, size_t pageSize); >+ > void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass); > void deallocateSmallChunk(Chunk*, size_t pageClass); > >Index: Source/bmalloc/bmalloc/SmallPage.h >=================================================================== >--- Source/bmalloc/bmalloc/SmallPage.h (revision 240992) >+++ Source/bmalloc/bmalloc/SmallPage.h (working copy) >@@ -41,7 +41,8 @@ public: > void ref(std::unique_lock<Mutex>&); > bool deref(std::unique_lock<Mutex>&); > unsigned refCount(std::unique_lock<Mutex>&) { return m_refCount; } >- >+ unsigned refCount(std::lock_guard<Mutex>&) { return m_refCount; } >+ > size_t sizeClass() { return m_sizeClass; } > void setSizeClass(size_t sizeClass) { m_sizeClass = sizeClass; } >
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Flags:
ggaren
:
review+
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 192389
:
356564
|
357062
|
361127
|
361133
|
361315
|
361348
|
361362
| 361436