WebKit Bugzilla
Attachment 356564 Details for
Bug 192389
: bmalloc uses more memory on iOS compared to macOS due to physical page size differences
Home
|
New
|
Browse
|
Search
|
[?]
|
Reports
|
Requests
|
Help
|
New Account
|
Log In
Remember
[x]
|
Forgot Password
Login:
[x]
[patch]
Work in progress patch
192389-wip1.patch (text/plain), 12.66 KB, created by
Michael Saboff
on 2018-12-04 18:08:18 PST
(
hide
)
Description:
Work in progress patch
Filename:
MIME Type:
Creator:
Michael Saboff
Created:
2018-12-04 18:08:18 PST
Size:
12.66 KB
patch
obsolete
>Index: Source/bmalloc/ChangeLog >=================================================================== >--- Source/bmalloc/ChangeLog (revision 238882) >+++ Source/bmalloc/ChangeLog (working copy) >@@ -1,3 +1,34 @@ >+2018-12-04 Michael Saboff <msaboff@apple.com> >+ >+ bmalloc uses more memory of iOS compared to macOS due to physical page size differences >+ https://bugs.webkit.org/show_bug.cgi?id=192389 >+ >+ Reviewed by NOBODY (OOPS!). >+ >+ Changed small line allocations to be in smallPageSize "virtual page" increments instead of >+ physical page size increments. This required changing the small page commit / decommit code >+ to work in full physical page increments. For page classes for physical page size and larger, >+ there wasn't any change. When scavenging page classes smaller than the physical page size, >+ we need to consider whether or not the adjacent small pages on the same physical page are >+ also free before decommiting that containing page. When we need to commit more memory, >+ we commit the whole page, and add any adjacent virtual pages that were fully commited as well. >+ >+ * bmalloc/Chunk.h: >+ (bmalloc::forEachPage): >+ * bmalloc/Heap.cpp: >+ (bmalloc::Heap::initializeLineMetadata): >+ (bmalloc::Heap::initializePageMetadata): >+ (bmalloc::Heap::scavenge): >+ (bmalloc::Heap::findSmallPageRangeSharingPhysicalPages): >+ (bmalloc::Heap::tryDecommitSmallPage): >+ (bmalloc::Heap::commitSmallPage): >+ (bmalloc::Heap::allocateSmallPage): >+ (bmalloc::Heap::allocateSmallBumpRangesByMetadata): >+ * bmalloc/Heap.h: >+ * bmalloc/VMAllocate.h: >+ (bmalloc::vmDeallocatePhysicalPages): >+ (bmalloc::physicalPageSizeSloppyRoundUp): >+ > 2018-11-21 Dominik Infuehr <dinfuehr@igalia.com> > > Enable JIT on ARM/Linux >Index: Source/bmalloc/bmalloc/Chunk.h >=================================================================== >--- Source/bmalloc/bmalloc/Chunk.h (revision 238835) >+++ Source/bmalloc/bmalloc/Chunk.h (working copy) >@@ -77,7 +77,8 @@ template<typename Function> void forEach > { > // We align to at least the page size so we can service aligned allocations > // at equal and smaller powers of two, and also so we can vmDeallocatePhysicalPages(). >- size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(pageSize, sizeof(Chunk)); >+ size_t firstPageOffset = max(pageSize, vmPageSize()); >+ size_t metadataSize = roundUpToMultipleOfNonPowerOfTwo(firstPageOffset, sizeof(Chunk)); > > Object begin(chunk, metadataSize); > Object end(chunk, chunkSize); >Index: Source/bmalloc/bmalloc/Heap.cpp >=================================================================== >--- Source/bmalloc/bmalloc/Heap.cpp (revision 238835) >+++ Source/bmalloc/bmalloc/Heap.cpp (working copy) >@@ -87,7 +87,7 @@ size_t Heap::gigacageSize() > void Heap::initializeLineMetadata() > { > size_t sizeClassCount = bmalloc::sizeClass(smallLineSize); >- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize; >+ size_t smallLineCount = smallPageSize / smallLineSize; > m_smallLineMetadata.grow(sizeClassCount * smallLineCount); > > for (size_t sizeClass = 0; sizeClass < sizeClassCount; ++sizeClass) { >@@ -96,7 +96,7 @@ void Heap::initializeLineMetadata() > > size_t object = 0; > size_t line = 0; >- while (object < m_vmPageSizePhysical) { >+ while (object < smallPageSize) { > line = object / smallLineSize; > size_t leftover = object % smallLineSize; > >@@ -110,7 +110,7 @@ void Heap::initializeLineMetadata() > } > > // Don't allow the last object in a page to escape the page. >- if (object > m_vmPageSizePhysical) { >+ if (object > smallPageSize) { > BASSERT(pageMetadata[line].objectCount); > --pageMetadata[line].objectCount; > } >@@ -122,11 +122,11 @@ void Heap::initializePageMetadata() > auto computePageSize = [&](size_t sizeClass) { > size_t size = objectSize(sizeClass); > if (sizeClass < bmalloc::sizeClass(smallLineSize)) >- return m_vmPageSizePhysical; >+ return smallPageSize; > >- for (size_t pageSize = m_vmPageSizePhysical; >+ for (size_t pageSize = smallPageSize; > pageSize < pageSizeMax; >- pageSize += m_vmPageSizePhysical) { >+ pageSize += smallPageSize) { > RELEASE_BASSERT(pageSize <= chunkSize / 2); > size_t waste = pageSize % size; > if (waste <= pageSize / pageSizeWasteFactor) >@@ -182,14 +182,7 @@ void Heap::scavenge(std::lock_guard<Mute > continue; > > size_t pageSize = bmalloc::pageSize(&list - &m_freePages[0]); >- size_t decommitSize = physicalPageSizeSloppy(page->begin()->begin(), pageSize); >- m_freeableMemory -= decommitSize; >- m_footprint -= decommitSize; >- decommitter.addEager(page->begin()->begin(), pageSize); >- page->setHasPhysicalPages(false); >-#if ENABLE_PHYSICAL_PAGE_MAP >- m_physicalPageMap.decommit(page->begin()->begin(), pageSize); >-#endif >+ tryDecommitSmallPage(lock, decommitter, page, pageSize); > } > } > } >@@ -261,6 +254,123 @@ void Heap::allocateSmallChunk(std::uniqu > m_freePages[pageClass].push(chunk); > } > >+template<typename Lock, typename Function> >+struct Heap::SmallPageRange Heap::findSmallPageRangeSharingPhysicalPages(Lock&, SmallPage* page, size_t pageSize, Function includeSmallPageAt) >+{ >+ struct SmallPageRange result { nullptr, 0 }; >+ >+ if (pageSize >= m_vmPageSizePhysical) { >+ result.first = page; >+ result.count = 1; >+ >+ return result; >+ } >+ >+ char* pageBegin = page->begin()->begin(); >+ char* physicalPagesBegin = roundDownToMultipleOf(m_vmPageSizePhysical, pageBegin); >+ char* physicalPagesEnd = roundUpToMultipleOf(m_vmPageSizePhysical, pageBegin + pageSize); >+ char* tempPageBegin = pageBegin; >+ char* firstIncludedPageBegin = nullptr; >+ >+ for (unsigned maxSmallPages = 32; tempPageBegin >= physicalPagesBegin && maxSmallPages--; tempPageBegin -= pageSize) { >+ if (!includeSmallPageAt(tempPageBegin)) >+ break; >+ >+ firstIncludedPageBegin = tempPageBegin; >+ result.count++; >+ } >+ >+ if (!firstIncludedPageBegin) >+ return result; >+ >+ Chunk* chunk = Chunk::get(firstIncludedPageBegin); >+ result.first = chunk->page(chunk->offset(firstIncludedPageBegin)); >+ tempPageBegin = pageBegin; >+ >+ if (tempPageBegin + pageSize < physicalPagesEnd) { >+ tempPageBegin += pageSize; >+ >+ for (unsigned maxSmallPages = 32; maxSmallPages--; tempPageBegin += pageSize) { >+ if (!includeSmallPageAt(tempPageBegin)) >+ break; >+ >+ result.count++; >+ >+ if (tempPageBegin + pageSize >= physicalPagesEnd) >+ break; >+ } >+ } >+ >+ return result; >+} >+ >+void Heap::tryDecommitSmallPage(std::lock_guard<Mutex>& lock, BulkDecommit& decommitter, SmallPage* smallPage, size_t pageSize) >+{ >+ struct Heap::SmallPageRange pageRange = findSmallPageRangeSharingPhysicalPages(lock, smallPage, pageSize, [&](char* smallPageBegin) { >+ Chunk* chunk = Chunk::get(smallPageBegin); >+ SmallPage* pageToCheck = chunk->page(chunk->offset(smallPageBegin)); >+ >+ for (auto* page : chunk->freePages()) { >+ if (pageToCheck == page) >+ return pageToCheck->hasPhysicalPages(); >+ } >+ >+ return false; >+ }); >+ >+ if (!pageRange.first || !pageRange.count) >+ return; >+ >+ char* firstPageBegin = pageRange.first->begin()->begin(); >+ size_t decommitSize = physicalPageSizeSloppy(firstPageBegin, pageRange.count * pageSize); >+ char* decommitEnd = firstPageBegin + decommitSize; >+ m_freeableMemory -= decommitSize; >+ m_footprint -= decommitSize; >+ decommitter.addEager(firstPageBegin, decommitSize); >+ Chunk* chunk = Chunk::get(firstPageBegin); >+ for (unsigned i = 0; i < pageRange.count; i++) { >+ char* pageBegin = firstPageBegin + (i * pageSize); >+ if (pageBegin >= firstPageBegin && pageBegin + pageSize <= decommitEnd) { >+ SmallPage* pageToDecommit = chunk->page(chunk->offset(pageBegin)); >+ pageToDecommit->setHasPhysicalPages(false); >+ } >+ } >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.decommit(pageRange.start, decommitSize); >+#endif >+} >+ >+void Heap::commitSmallPage(std::unique_lock<Mutex>& lock, SmallPage* page, size_t pageSize) >+{ >+ struct Heap::SmallPageRange pageRange = findSmallPageRangeSharingPhysicalPages(lock, page, pageSize, [&](char* smallPageBegin) { >+ Chunk* chunk = Chunk::get(smallPageBegin); >+ SmallPage* pageToCheck = chunk->page(chunk->offset(smallPageBegin)); >+ return !pageToCheck->hasPhysicalPages(); >+ }); >+ >+ BASSERT(pageRange.first && pageRange.count); >+ >+ char* firstPageBegin = pageRange.first->begin()->begin(); >+ size_t commitSize = physicalPageSizeSloppyRoundUp(firstPageBegin, pageRange.count * pageSize); >+ BASSERT(commitSize); >+ char* commitEnd = firstPageBegin + commitSize; >+ m_footprint += commitSize; >+ >+ Chunk* chunk = Chunk::get(firstPageBegin); >+ BASSERT(chunk == Chunk::get(page->begin()->begin())); >+ vmAllocatePhysicalPagesSloppy(firstPageBegin, commitSize); >+ for (unsigned i = 0; i < pageRange.count; i++) { >+ char* pageBegin = firstPageBegin + (i * pageSize); >+ if (pageBegin >= firstPageBegin && pageBegin + pageSize <= commitEnd) { >+ SmallPage* pageToCommit = chunk->page(chunk->offset(pageBegin)); >+ pageToCommit->setHasPhysicalPages(true); >+ } >+ } >+#if ENABLE_PHYSICAL_PAGE_MAP >+ m_physicalPageMap.commit(pageRange.start, commitSize); >+#endif >+} >+ > void Heap::deallocateSmallChunk(Chunk* chunk, size_t pageClass) > { > m_objectTypes.set(chunk, ObjectType::Large); >@@ -319,12 +429,7 @@ SmallPage* Heap::allocateSmallPage(std:: > m_freeableMemory -= physicalSize; > else { > m_scavenger->scheduleIfUnderMemoryPressure(pageSize); >- m_footprint += physicalSize; >- vmAllocatePhysicalPagesSloppy(page->begin()->begin(), pageSize); >- page->setHasPhysicalPages(true); >-#if ENABLE_PHYSICAL_PAGE_MAP >- m_physicalPageMap.commit(page->begin()->begin(), pageSize); >-#endif >+ commitSmallPage(lock, page, pageSize); > } > > return page; >@@ -384,7 +489,7 @@ void Heap::allocateSmallBumpRangesByMeta > SmallPage* page = allocateSmallPage(lock, sizeClass, lineCache); > SmallLine* lines = page->begin(); > BASSERT(page->hasFreeLines(lock)); >- size_t smallLineCount = m_vmPageSizePhysical / smallLineSize; >+ size_t smallLineCount = smallPageSize / smallLineSize; > LineMetadata* pageMetadata = &m_smallLineMetadata[sizeClass * smallLineCount]; > > auto findSmallBumpRange = [&](size_t& lineNumber) { >Index: Source/bmalloc/bmalloc/Heap.h >=================================================================== >--- Source/bmalloc/bmalloc/Heap.h (revision 238835) >+++ Source/bmalloc/bmalloc/Heap.h (working copy) >@@ -103,6 +103,11 @@ private: > } > }; > >+ struct SmallPageRange { >+ SmallPage* first; >+ size_t count; >+ }; >+ > ~Heap() = delete; > > bool usingGigacage(); >@@ -120,6 +125,11 @@ private: > SmallPage* allocateSmallPage(std::unique_lock<Mutex>&, size_t sizeClass, LineCache&); > void deallocateSmallLine(std::unique_lock<Mutex>&, Object, LineCache&); > >+ template<typename Lock, typename Function> >+ struct SmallPageRange findSmallPageRangeSharingPhysicalPages(Lock&, SmallPage*, size_t pageSize, Function includeSmallPage); >+ void tryDecommitSmallPage(std::lock_guard<Mutex>&, BulkDecommit& decommitter, SmallPage*, size_t pageSize); >+ void commitSmallPage(std::unique_lock<Mutex>&, SmallPage*, size_t pageSize); >+ > void allocateSmallChunk(std::unique_lock<Mutex>&, size_t pageClass); > void deallocateSmallChunk(Chunk*, size_t pageClass); > >Index: Source/bmalloc/bmalloc/VMAllocate.h >=================================================================== >--- Source/bmalloc/bmalloc/VMAllocate.h (revision 238835) >+++ Source/bmalloc/bmalloc/VMAllocate.h (working copy) >@@ -235,6 +235,16 @@ inline size_t physicalPageSizeSloppy(voi > return end - begin; > } > >+inline size_t physicalPageSizeSloppyRoundUp(void* p, size_t size) >+{ >+ char* begin = roundDownToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p)); >+ char* end = roundUpToMultipleOf(vmPageSizePhysical(), static_cast<char*>(p) + size); >+ >+ if (begin >= end) >+ return 0; >+ return end - begin; >+} >+ > // Trims requests that are un-page-aligned. > inline void vmDeallocatePhysicalPagesSloppy(void* p, size_t size) > {
You cannot view the attachment while viewing its details because your browser does not support IFRAMEs.
View the attachment on a separate page
.
View Attachment As Diff
View Attachment As Raw
Actions:
View
|
Formatted Diff
|
Diff
Attachments on
bug 192389
:
356564
|
357062
|
361127
|
361133
|
361315
|
361348
|
361362
|
361436