WebKit Bugzilla
Attachment 361127 Details for
Bug 192389
: bmalloc uses more memory on iOS compared to macOS due to physical page size differences
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
{patch addressing review issues
192389-2.patch (text/plain), 12.63 KB, created by
Michael Saboff
on 2019-02-04 16:44:19 PST
(
hide
)
Description:
{patch addressing review issues
Filename:
MIME Type:
Creator:
Michael Saboff
Created:
2019-02-04 16:44:19 PST
Size:
12.63 KB
patch
obsolete
>Index: Source/bmalloc/ChangeLog >=================================================================== >--- Source/bmalloc/ChangeLog (revision 240947) >+++ Source/bmalloc/ChangeLog (working copy) >@@ -1,3 +1,32 @@ >+2019-02-04 Michael Saboff <msaboff@apple.com> >+ >+ Need a short description (OOPS!). >+ Need the bug URL (OOPS!). >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Changed small line allocations to be in smallPageSize "virtual page" multiples instead of >+ physical page size increments. This required changing the small page commit / decommit code >+ to work in full physical page increments. For page classes that are physical page size and larger, >+ there isn't any functional change. When scavenging page classes smaller than the physical page size, >+ we need to consider whether or not the adjacent small pages on the same physical page are also >+ free before decommiting that containing page. When we need to commit more memory, we commit >+ the whole page, and add any adjacent virtual pages that were fully committed as well. >+ >+ * bmalloc/Chunk.h: >+ (bmalloc::forEachPage): >+ * bmalloc/Heap.cpp: >+ (bmalloc::Heap::initializeLineMetadata): >+ (bmalloc::Heap::initializePageMetadata): >+ (bmalloc::Heap::scavenge): >+ (bmalloc::Heap::tryDecommitSmallPage): >+ (bmalloc::Heap::commitSmallPage): >+ (bmalloc::Heap::allocateSmallPage): >+ (bmalloc::Heap::allocateSmallBumpRangesByMetadata): >+ * bmalloc/Heap.h: >+ * bmalloc/VMAllocate.h: >+ (bmalloc::physicalPageSizeSloppyRoundUp): >+ > 2019-01-18 Keith Miller <keith_miller@apple.com> > > gigacage slide should randomize both start and end >Index: Source/bmalloc/bmalloc/Chunk.h >=================================================================== >--- Source/bmalloc/bmalloc/Chunk.h (revision 240939) >+++ Source/bmalloc/bmalloc/Chunk.h (working copy) >@@ -77,7 +77,8 @@ template<typename Function> void forEach > { > // We align to at least the page size so we can service aligned allocations > // at equal and smaller powers of two, and also so we can vmDeallocatePhysicalPages(). >- size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk)); >+ size_t firstPageOffset = max(pageSize, vmPageSize()); >+ size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(firstPageOffset, sizeof(Chunk)); > > Object begin(chunk, metadataSize); > Object end(chunk, chunkSize); >Index: Source/bmalloc/bmalloc/Heap.cpp >=================================================================== >--- Source/bmalloc/bmalloc/Heap.cpp (revision 240939) >+++ Source/bmalloc/bmalloc/Heap.cpp (working copy) >@@ -44,6 +44,8 @@ > > namespace bmalloc { > >+static_assert(bmalloc::isPowerOfTwo(smallPageSize), ""); >+ > Heap::Heap(HeapKind kind, std::lock_guard<Mutex>&) > : m_kind(kind) > , m_vmPageSizePhysical(vmPageSizePhysical()) >@@ -93,7 +95,7 @@ size_t Heap::gigacageSize() > void Heap::initializeLineMetadata() > { > size_t sizeClassCount = bmalloc::sizeClass(smallLineSize); >- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize; >+ size_t smallLineCount = smallPageSize / smallLineSize; > m_smallLineMetadata.grow(sizeClassCount * smallLineCount); > > for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) { >@@ -102,7 +104,7 @@ void Heap::initializeLineMetadata() > > size_t object = 0; > size_t line = 0; >- while (object < m_vmPageSizePhysical) { >+ while (object < smallPageSize) { > line = object / smallLineSize; > size_t leftover = object % smallLineSize; > >@@ -116,7 +118,7 @@ void Heap::initializeLineMetadata() > } > > // Don't allow the last object in a page to escape the page. >- if (object > m_vmPageSizePhysical) { >+ if (object > smallPageSize) { > BASSERT(pageMetadata[line].objectCount); > --pageMetadata[line].objectCount; > } >@@ -128,11 +130,10 @@ void Heap::initializePageMetadata() > auto computePageSize = [&](size_t sizeClass) { > size_t size = objectSize(sizeClass); > if (sizeClass < bmalloc::sizeClass(smallLineSize)) >- return m_vmPageSizePhysical; >+ return smallPageSize; > >- for (size_t pageSize = m_vmPageSizePhysical; >- pageSize < pageSizeMax; >- pageSize += m_vmPageSizePhysical) { >+ // We only want power of 2 pageSizes. Given that smallPageSize is a power of 2, we just double it when we want a larger size. >+ for (size_t pageSize = smallPageSize; pageSize < pageSizeMax; pageSize *= 2) { > RELEASE_BASSERT(pageSize <= chunkSize / 2); > size_t waste = pageSize % size; > if (waste <= pageSize / pageSizeWasteFactor) >@@ -188,14 +189,7 @@ void Heap::scavenge(std::lock_guard<Mute > continue; > > size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]); >- size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize); >- m_freeableMemory -= decommitSize; >- m_footprint -= decommitSize; >- decommitter.addEager(page->begin()->begin(), pageSize); >- page->setHasPhysicalPages(false); >-#if ENABLE_PHYSICAL_PAGE_MAP >- m_physicalPageMap.decommit(page->begin()->begin(), pageSize); >-#endif >+ tryDecommitSmallPage(lock, decommitter, page, pageSize); > } > } > } >@@ -267,6 +261,112 @@ void Heap::allocateSmallChunk(std::uniqu > m_freePages[pageClass].push(chunk); > } > >+void Heap::tryDecommitSmallPage(std::lock_guard<Mutex>&, BulkDecommit& decommitter, SmallPage* smallPage, size_t pageSize) >+{ >+ Chunk* chunk = Chunk::get(smallPage); >+ SmallPage* firstPageToDecommit { nullptr }; >+ size_t pagesToDecommit { 0 }; >+ >+ if (pageSize >= m_vmPageSizePhysical) { >+ firstPageToDecommit = smallPage; >+ pagesToDecommit = 1; >+ } else { >+ size_t matchingPageCount = 0; >+ >+ char* physicalPagesBegin = roundDownToMultipleOf(m_vmPageSizePhysical, smallPage->begin()->begin()); >+ unsigned smallPageCount = m_vmPageSizePhysical / pageSize; >+ >+ SmallPage* firstPageInRange = chunk->page(chunk->offset(physicalPagesBegin)); >+ SmallPage* lastPageInRange = chunk->page(chunk->offset(physicalPagesBegin + pageSize * (smallPageCount - 1))); >+ >+ for (auto* page : chunk->freePages()) { >+ if (page >= firstPageInRange && page <= lastPageInRange) { >+ matchingPageCount++; >+ if (matchingPageCount == smallPageCount) { >+ firstPageToDecommit = firstPageInRange; >+ pagesToDecommit = matchingPageCount; >+ break; >+ } >+ } >+ } >+ } >+ >+ if (!firstPageToDecommit || !pagesToDecommit) >+ return; >+ >+ char* firstPageBegin = firstPageToDecommit->begin()->begin(); >+ size_t decommitSize = physicalPageSizeSloppy(firstPageBegin, pagesToDecommit * pageSize); >+ m_freeableMemory -= decommitSize; >+ m_footprint -= decommitSize; >+ decommitter.addEager(firstPageBegin, decommitSize); >+ >+ size_t firstPageOffset = chunk->offset(firstPageBegin); >+ size_t lastPageOffset = firstPageOffset + pagesToDecommit * pageSize; >+ >+ Object begin(chunk, firstPageOffset); >+ Object end(chunk, lastPageOffset); >+ >+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) >+ it.page()->setHasPhysicalPages(false); >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.decommit(firstPageToDecommit, decommitSize); >+#endif >+} >+ >+void Heap::commitSmallPage(std::unique_lock<Mutex>&, SmallPage* page, size_t pageSize) >+{ >+ Chunk* chunk = Chunk::get(page); >+ SmallPage* firstPageToCommit { nullptr }; >+ size_t pagesToCommit { 0 }; >+ >+ if (pageSize >= m_vmPageSizePhysical) { >+ firstPageToCommit = page; >+ pagesToCommit = 1; >+ } else { >+ char* physicalPagesBegin = roundDownToMultipleOf(m_vmPageSizePhysical, page->begin()->begin()); >+ unsigned smallPageCount = m_vmPageSizePhysical / pageSize; >+ >+ size_t firstPageOffset = chunk->offset(physicalPagesBegin); >+ size_t lastPageOffset = firstPageOffset + smallPageCount * pageSize; >+ >+ Object begin(chunk, firstPageOffset); >+ Object end(chunk, lastPageOffset); >+ >+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) { >+ if (!firstPageToCommit) { >+ if (!it.page()->hasPhysicalPages()) { >+ firstPageToCommit = it.page(); >+ pagesToCommit = 1; >+ } >+ } else if (!it.page()->hasPhysicalPages()) >+ pagesToCommit++; >+ else >+ break; >+ } >+ >+ BASSERT(firstPageToCommit && pagesToCommit); >+ } >+ >+ char* firstPageBegin = firstPageToCommit->begin()->begin(); >+ size_t commitSize = physicalPageSizeSloppyRoundUp(firstPageBegin, pagesToCommit * pageSize); >+ BASSERT(commitSize); >+ m_footprint += commitSize; >+ >+ vmAllocatePhysicalPagesSloppy(firstPageBegin, commitSize); >+ >+ size_t firstPageOffset = chunk->offset(firstPageBegin); >+ size_t lastPageOffset = firstPageOffset + pagesToCommit * pageSize; >+ >+ Object begin(chunk, firstPageOffset); >+ Object end(chunk, lastPageOffset); >+ >+ for (auto it = begin; it + pageSize <= end; it = it + pageSize) >+ it.page()->setHasPhysicalPages(true); >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.commit(firstPageToCommit, commitSize); >+#endif >+} >+ > void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass) > { > m_objectTypes.set(chunk, ObjectType::Large); >@@ -325,12 +425,7 @@ SmallPage* Heap::allocateSmallPage(std:: > m_freeableMemory -= physicalSize; > else { > m_scavenger->scheduleIfUnderMemoryPressure(pageSize); >- m_footprint += physicalSize; >- vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize); >- page->setHasPhysicalPages(true); >-#if ENABLE_PHYSICAL_PAGE_MAP >- m_physicalPageMap.commit(page->begin()->begin(), pageSize); >-#endif >+ commitSmallPage(lock, page, pageSize); > } > > return page; >@@ -390,7 +485,7 @@ void Heap::allocateSmallBumpRangesByMeta > SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache); > SmallLine* lines = page->begin(); > BASSERT(page->hasFreeLines(lock)); >- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize; >+ size_t smallLineCount = smallPageSize / smallLineSize; > LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount]; > > auto findSmallBumpRange = [&](size_t& lineNumber) { >Index: Source/bmalloc/bmalloc/Heap.h >=================================================================== >--- Source/bmalloc/bmalloc/Heap.h (revision 240939) >+++ Source/bmalloc/bmalloc/Heap.h (working copy) >@@ -120,6 +120,9 @@ private: > SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&); > void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&); > >+ void tryDecommitSmallPage(std::lock_guard<Mutex>&, BulkDecommit& decommitter, SmallPage*, size_t pageSize); >+ void commitSmallPage(std::unique_lock<Mutex>&, SmallPage*, size_t pageSize); >+ > void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass); > void deallocateSmallChunk(Chunk*, size_t pageClass); > >Index: Source/bmalloc/bmalloc/Sizes.h >=================================================================== >--- Source/bmalloc/bmalloc/Sizes.h (revision 240939) >+++ Source/bmalloc/bmalloc/Sizes.h (working copy) >@@ -51,7 +51,7 @@ static constexpr size_t chunkSize = 1 * > static constexpr size_t chunkMask = ~(chunkSize - 1ul); > > static constexpr size_t smallLineSize = 256; >-static constexpr size_t smallPageSize = 4 * kB; >+static constexpr size_t smallPageSize = 2 * kB; > static constexpr size_t smallPageLineCount = smallPageSize / smallLineSize; > > static constexpr size_t maskSizeClassMax = 512; >Index: Source/bmalloc/bmalloc/VMAllocate.h >=================================================================== >--- Source/bmalloc/bmalloc/VMAllocate.h (revision 240939) >+++ Source/bmalloc/bmalloc/VMAllocate.h (working copy) >@@ -234,6 +234,16 @@ inline size_t physicalPageSizeSloppy(voi > return end - begin; > } > >+inline size_t physicalPageSizeSloppyRoundUp(void* p, size_t size) >+{ >+ char* begin = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p)); >+ char* end = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size); >+ >+ if (begin >= end) >+ return 0; >+ return end - begin; >+} >+ > // Trims requests that are un-page-aligned. > inline void vmDeallocatePhysicalPagesSloppy(void* p, size_t size) > {
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 192389
:
356564
|
357062
|
361127
|
361133
|
361315
|
361348
|
361362
|
361436