From: Larry Woodman <lwoodman@redhat.com> Date: Mon, 14 Jan 2008 11:57:00 -0500 Subject: [mm] prevent cpu lockups in invalidate_mapping_pages Message-id: 1200329820.12098.7.camel@dhcp83-56.boston.redhat.com O-Subject: [RHEL5-U2 patch] invalidate_mapping_pages() can take long long enough to incur cpu lockup messages. Bugzilla: 427798 invalidate_mapping_pages() loops through every page between start and end without rescheduling. This can cause the cpu lockup to occur if there are enough pages when called from the drop_caches code. The attached upstream patch calls cond_resched() after each pvec is processed to prevent the cpu lockup. Fixes RHEL5-U2 blocker BZ427798 Acked-by: Rik van Riel <riel@redhat.com> diff --git a/fs/drop_caches.c b/fs/drop_caches.c index 4e47623..59375ef 100644 --- a/fs/drop_caches.c +++ b/fs/drop_caches.c @@ -20,7 +20,7 @@ static void drop_pagecache_sb(struct super_block *sb) list_for_each_entry(inode, &sb->s_inodes, i_sb_list) { if (inode->i_state & (I_FREEING|I_WILL_FREE)) continue; - invalidate_inode_pages(inode->i_mapping); + __invalidate_mapping_pages(inode->i_mapping, 0, -1, true); } spin_unlock(&inode_lock); } diff --git a/include/linux/fs.h b/include/linux/fs.h index 7952faf..3423d23 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1561,6 +1561,9 @@ extern int check_disk_change(struct block_device *); extern int invalidate_inodes(struct super_block *); extern int __invalidate_device(struct block_device *); extern int invalidate_partition(struct gendisk *, int); +unsigned long __invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end, + bool be_atomic); unsigned long invalidate_mapping_pages(struct address_space *mapping, pgoff_t start, pgoff_t end); unsigned long invalidate_inode_pages(struct address_space *mapping); diff --git a/mm/truncate.c b/mm/truncate.c index 35ee138..2599770 100644 --- a/mm/truncate.c +++ b/mm/truncate.c @@ -208,21 +208,8 @@ void truncate_inode_pages(struct address_space *mapping, loff_t lstart) } EXPORT_SYMBOL(truncate_inode_pages); -/** - * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode - * @mapping: the address_space which holds the pages to invalidate - * @start: the offset 'from' which to invalidate - * @end: the offset 'to' which to invalidate (inclusive) - * - * This function only removes the unlocked pages, if you want to - * remove all the pages of one inode, you must call truncate_inode_pages. - * - * invalidate_mapping_pages() will not block on IO activity. It will not - * invalidate pages which are dirty, locked, under writeback or mapped into - * pagetables. - */ -unsigned long invalidate_mapping_pages(struct address_space *mapping, - pgoff_t start, pgoff_t end) +unsigned long __invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end, bool be_atomic) { struct pagevec pvec; pgoff_t next = start; @@ -263,10 +250,31 @@ unlock: break; } pagevec_release(&pvec); + if (likely(!be_atomic)) + cond_resched(); } return ret; } + /** + * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode + * @mapping: the address_space which holds the pages to invalidate + * @start: the offset 'from' which to invalidate + * @end: the offset 'to' which to invalidate (inclusive) + * + * This function only removes the unlocked pages, if you want to + * remove all the pages of one inode, you must call truncate_inode_pages. + * + * invalidate_mapping_pages() will not block on IO activity. It will not + * invalidate pages which are dirty, locked, under writeback or mapped into + * pagetables. + */ +unsigned long invalidate_mapping_pages(struct address_space *mapping, + pgoff_t start, pgoff_t end) +{ + return __invalidate_mapping_pages(mapping, start, end, false); +} + EXPORT_SYMBOL_GPL(invalidate_mapping_pages); unsigned long invalidate_inode_pages(struct address_space *mapping)