if (PageHighMem(page)) {
void *ptr = kmap_atomic(page);
- dmac_flush_range(ptr, ptr + PAGE_SIZE);
+// dmac_flush_range(ptr, ptr + PAGE_SIZE);
kunmap_atomic(ptr);
} else {
void *ptr = page_address(page);
- dmac_flush_range(ptr, ptr + PAGE_SIZE);
+// dmac_flush_range(ptr, ptr + PAGE_SIZE);
}
}
*/
extern void wait_on_page_bit(struct page *page, int bit_nr);
+extern void wait_on_page_bit_timeout(struct page *page, int bit_nr);
+
extern int wait_on_page_bit_killable(struct page *page, int bit_nr);
extern int wait_on_page_bit_killable_timeout(struct page *page,
int bit_nr, unsigned long timeout);
wait_on_page_bit(page, PG_locked);
}
+static inline void wait_on_page_locked_timeout(struct page *page)
+{
+ if (PageLocked(page))
+ wait_on_page_bit_timeout(page, PG_locked);
+}
+
/*
* Wait for a page to complete writeback
*/
}
EXPORT_SYMBOL(delete_from_page_cache);
+static int sleep_on_page_timeout(struct wait_bit_key *word)
+{
+ return io_schedule_timeout(2) ? 0 : -EAGAIN;
+}
+
static int filemap_check_errors(struct address_space *mapping)
{
int ret = 0;
}
EXPORT_SYMBOL(wait_on_page_bit);
+void wait_on_page_bit_timeout(struct page *page, int bit_nr)
+{
+ DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
+
+ if (test_bit(bit_nr, &page->flags))
+ __wait_on_bit(page_waitqueue(page), &wait,
+ sleep_on_page_timeout, TASK_UNINTERRUPTIBLE);
+}
+EXPORT_SYMBOL(wait_on_page_bit_timeout);
+
int wait_on_page_bit_killable(struct page *page, int bit_nr)
{
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
*/
get_page_foll(newpage);
- if (migrate_replace_page(page, newpage) == 0)
+ if (migrate_replace_page(page, newpage) == 0) {
+ put_page(newpage);
return newpage;
+ }
put_page(newpage);
__free_page(newpage);
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
+ static DEFINE_MUTEX(s_follow_page_lock);
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
cond_resched();
+ mutex_lock(&s_follow_page_lock);
page = follow_page_mask(vma, start, foll_flags, &page_mask);
if (!page) {
int ret;
ret = faultin_page(tsk, vma, start, &foll_flags,
nonblocking);
+ mutex_unlock(&s_follow_page_lock);
switch (ret) {
case 0:
goto retry;
return i ? i : PTR_ERR(page);
}
- if ((gup_flags & FOLL_DURABLE) && is_cma_page(page))
+ if (is_cma_page(page) && (foll_flags & FOLL_GET)) {
+ struct page *old_page = page;
+ unsigned int fault_flags = 0;
+
+ put_page(page);
+ wait_on_page_locked_timeout(page);
page = migrate_replace_cma_page(page);
+ /* migration might be successful. vma mapping
+ * might have changed if there had been a write
+ * fault from other accesses before migration
+ * code locked the page. Follow the page again
+ * to get the latest mapping. If migration was
+ * successful, follow again would get
+ * non-CMA page. If there had been a write
+ * page fault, follow page and CMA page
+ * replacement(if necessary) would restart with
+ * new page.
+ */
+ if (page == old_page)
+ wait_on_page_locked_timeout(page);
+ if (foll_flags & FOLL_WRITE) {
+ /* page would be marked as old during
+ * migration. To make it young, call
+ * handle_mm_fault.
+ * This to avoid the sanity check
+ * failures in the calling code, which
+ * check for pte write permission
+ * bits.
+ */
+ fault_flags |= FAULT_FLAG_WRITE;
+ handle_mm_fault(mm, vma,
+ start, fault_flags);
+ }
+ foll_flags = gup_flags;
+ mutex_unlock(&s_follow_page_lock);
+ goto retry;
+ }
+
+ mutex_unlock(&s_follow_page_lock);
+ BUG_ON(is_cma_page(page) && (foll_flags & FOLL_GET));
if (pages) {
pages[i] = page;
page = migration_entry_to_page(entry);
- /*
- * Once radix-tree replacement of page migration started, page_count
- * *must* be zero. And, we don't want to call wait_on_page_locked()
- * against a page without get_page().
- * So, we use get_page_unless_zero(), here. Even failed, page fault
- * will occur again.
- */
- if (!get_page_unless_zero(page))
- goto out;
- pte_unmap_unlock(ptep, ptl);
- wait_on_page_locked(page);
- put_page(page);
+ if (is_cma_page(page)) {
+ pte_unmap_unlock(ptep, ptl);
+ /* don't take ref on page, as it causes
+ * migration to get aborted in between.
+ * migration goes ahead after locking the page.
+ * Wait on page to be unlocked. In case page get
+ * unlocked, allocated and locked again forever,
+ * before this function call, it would timeout in
+ * next tick and exit.
+ */
+ wait_on_page_locked_timeout(page);
+ } else {
+ /*
+ * Once radix-tree replacement of page migration started, page_count
+ * *must* be zero. And, we don't want to call wait_on_page_locked()
+ * against a page without get_page().
+ * So, we use get_page_unless_zero(), here. Even failed, page fault
+ * will occur again.
+ */
+ if (!get_page_unless_zero(page))
+ goto out;
+ pte_unmap_unlock(ptep, ptl);
+ wait_on_page_locked(page);
+ put_page(page);
+ }
return;
out:
pte_unmap_unlock(ptep, ptl);
return -EAGAIN;
}
- /* page is now isolated, so release additional reference */
- put_page(page);
-
for (pass = 0; pass < 10 && ret != 0; pass++) {
cond_resched();