/*1* mm/page-writeback.c2*3* Copyright (C) 2002, Linus Torvalds.4* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <[email protected]>5*6* Contains functions related to writing back dirty pages at the7* address_space level.8*9* 10Apr2002 Andrew Morton10* Initial version11*/1213#include <linux/kernel.h>14#include <linux/module.h>15#include <linux/spinlock.h>16#include <linux/fs.h>17#include <linux/mm.h>18#include <linux/swap.h>19#include <linux/slab.h>20#include <linux/pagemap.h>21#include <linux/writeback.h>22#include <linux/init.h>23#include <linux/backing-dev.h>24#include <linux/task_io_accounting_ops.h>25#include <linux/blkdev.h>26#include <linux/mpage.h>27#include <linux/rmap.h>28#include <linux/percpu.h>29#include <linux/notifier.h>30#include <linux/smp.h>31#include <linux/sysctl.h>32#include <linux/cpu.h>33#include <linux/syscalls.h>34#include <linux/buffer_head.h>35#include <linux/pagevec.h>36#include <trace/events/writeback.h>3738/*39* After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited40* will look to see if it needs to force writeback or throttling.41*/42static long ratelimit_pages = 32;4344/*45* When balance_dirty_pages decides that the caller needs to perform some46* non-background writeback, this is how many pages it will attempt to write.47* It should be somewhat larger than dirtied pages to ensure that reasonably48* large amounts of I/O are submitted.49*/50static inline long sync_writeback_pages(unsigned long dirtied)51{52if (dirtied < ratelimit_pages)53dirtied = ratelimit_pages;5455return dirtied + dirtied / 2;56}5758/* The following parameters are exported via /proc/sys/vm */5960/*61* Start background writeback (via writeback threads) at this percentage62*/63int dirty_background_ratio = 10;6465/*66* dirty_background_bytes starts at 0 (disabled) so that it is a function of67* dirty_background_ratio * the amount of dirtyable memory68*/69unsigned long dirty_background_bytes;7071/*72* free highmem will not be subtracted from the total free memory73* for calculating free ratios if vm_highmem_is_dirtyable is true74*/75int vm_highmem_is_dirtyable;7677/*78* The generator of dirty data starts writeback at this percentage79*/80int vm_dirty_ratio = 20;8182/*83* vm_dirty_bytes starts at 0 (disabled) so that it is a function of84* vm_dirty_ratio * the amount of dirtyable memory85*/86unsigned long vm_dirty_bytes;8788/*89* The interval between `kupdate'-style writebacks90*/91unsigned int dirty_writeback_interval = 5 * 100; /* centiseconds */9293/*94* The longest time for which data is allowed to remain dirty95*/96unsigned int dirty_expire_interval = 30 * 100; /* centiseconds */9798/*99* Flag that makes the machine dump writes/reads and block dirtyings.100*/101int block_dump;102103/*104* Flag that puts the machine in "laptop mode". Doubles as a timeout in jiffies:105* a full sync is triggered after this time elapses without any disk activity.106*/107int laptop_mode;108109EXPORT_SYMBOL(laptop_mode);110111/* End of sysctl-exported parameters */112113114/*115* Scale the writeback cache size proportional to the relative writeout speeds.116*117* We do this by keeping a floating proportion between BDIs, based on page118* writeback completions [end_page_writeback()]. Those devices that write out119* pages fastest will get the larger share, while the slower will get a smaller120* share.121*122* We use page writeout completions because we are interested in getting rid of123* dirty pages. Having them written out is the primary goal.124*125* We introduce a concept of time, a period over which we measure these events,126* because demand can/will vary over time. The length of this period itself is127* measured in page writeback completions.128*129*/130static struct prop_descriptor vm_completions;131static struct prop_descriptor vm_dirties;132133/*134* couple the period to the dirty_ratio:135*136* period/2 ~ roundup_pow_of_two(dirty limit)137*/138static int calc_period_shift(void)139{140unsigned long dirty_total;141142if (vm_dirty_bytes)143dirty_total = vm_dirty_bytes / PAGE_SIZE;144else145dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) /146100;147return 2 + ilog2(dirty_total - 1);148}149150/*151* update the period when the dirty threshold changes.152*/153static void update_completion_period(void)154{155int shift = calc_period_shift();156prop_change_shift(&vm_completions, shift);157prop_change_shift(&vm_dirties, shift);158}159160int dirty_background_ratio_handler(struct ctl_table *table, int write,161void __user *buffer, size_t *lenp,162loff_t *ppos)163{164int ret;165166ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);167if (ret == 0 && write)168dirty_background_bytes = 0;169return ret;170}171172int dirty_background_bytes_handler(struct ctl_table *table, int write,173void __user *buffer, size_t *lenp,174loff_t *ppos)175{176int ret;177178ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);179if (ret == 0 && write)180dirty_background_ratio = 0;181return ret;182}183184int dirty_ratio_handler(struct ctl_table *table, int write,185void __user *buffer, size_t *lenp,186loff_t *ppos)187{188int old_ratio = vm_dirty_ratio;189int ret;190191ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);192if (ret == 0 && write && vm_dirty_ratio != old_ratio) {193update_completion_period();194vm_dirty_bytes = 0;195}196return ret;197}198199200int dirty_bytes_handler(struct ctl_table *table, int write,201void __user *buffer, size_t *lenp,202loff_t *ppos)203{204unsigned long old_bytes = vm_dirty_bytes;205int ret;206207ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);208if (ret == 0 && write && vm_dirty_bytes != old_bytes) {209update_completion_period();210vm_dirty_ratio = 0;211}212return ret;213}214215/*216* Increment the BDI's writeout completion count and the global writeout217* completion count. Called from test_clear_page_writeback().218*/219static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)220{221__prop_inc_percpu_max(&vm_completions, &bdi->completions,222bdi->max_prop_frac);223}224225void bdi_writeout_inc(struct backing_dev_info *bdi)226{227unsigned long flags;228229local_irq_save(flags);230__bdi_writeout_inc(bdi);231local_irq_restore(flags);232}233EXPORT_SYMBOL_GPL(bdi_writeout_inc);234235void task_dirty_inc(struct task_struct *tsk)236{237prop_inc_single(&vm_dirties, &tsk->dirties);238}239240/*241* Obtain an accurate fraction of the BDI's portion.242*/243static void bdi_writeout_fraction(struct backing_dev_info *bdi,244long *numerator, long *denominator)245{246if (bdi_cap_writeback_dirty(bdi)) {247prop_fraction_percpu(&vm_completions, &bdi->completions,248numerator, denominator);249} else {250*numerator = 0;251*denominator = 1;252}253}254255static inline void task_dirties_fraction(struct task_struct *tsk,256long *numerator, long *denominator)257{258prop_fraction_single(&vm_dirties, &tsk->dirties,259numerator, denominator);260}261262/*263* task_dirty_limit - scale down dirty throttling threshold for one task264*265* task specific dirty limit:266*267* dirty -= (dirty/8) * p_{t}268*269* To protect light/slow dirtying tasks from heavier/fast ones, we start270* throttling individual tasks before reaching the bdi dirty limit.271* Relatively low thresholds will be allocated to heavy dirtiers. So when272* dirty pages grow large, heavy dirtiers will be throttled first, which will273* effectively curb the growth of dirty pages. Light dirtiers with high enough274* dirty threshold may never get throttled.275*/276static unsigned long task_dirty_limit(struct task_struct *tsk,277unsigned long bdi_dirty)278{279long numerator, denominator;280unsigned long dirty = bdi_dirty;281u64 inv = dirty >> 3;282283task_dirties_fraction(tsk, &numerator, &denominator);284inv *= numerator;285do_div(inv, denominator);286287dirty -= inv;288289return max(dirty, bdi_dirty/2);290}291292/*293*294*/295static unsigned int bdi_min_ratio;296297int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)298{299int ret = 0;300301spin_lock_bh(&bdi_lock);302if (min_ratio > bdi->max_ratio) {303ret = -EINVAL;304} else {305min_ratio -= bdi->min_ratio;306if (bdi_min_ratio + min_ratio < 100) {307bdi_min_ratio += min_ratio;308bdi->min_ratio += min_ratio;309} else {310ret = -EINVAL;311}312}313spin_unlock_bh(&bdi_lock);314315return ret;316}317318int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)319{320int ret = 0;321322if (max_ratio > 100)323return -EINVAL;324325spin_lock_bh(&bdi_lock);326if (bdi->min_ratio > max_ratio) {327ret = -EINVAL;328} else {329bdi->max_ratio = max_ratio;330bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;331}332spin_unlock_bh(&bdi_lock);333334return ret;335}336EXPORT_SYMBOL(bdi_set_max_ratio);337338/*339* Work out the current dirty-memory clamping and background writeout340* thresholds.341*342* The main aim here is to lower them aggressively if there is a lot of mapped343* memory around. To avoid stressing page reclaim with lots of unreclaimable344* pages. It is better to clamp down on writers than to start swapping, and345* performing lots of scanning.346*347* We only allow 1/2 of the currently-unmapped memory to be dirtied.348*349* We don't permit the clamping level to fall below 5% - that is getting rather350* excessive.351*352* We make sure that the background writeout level is below the adjusted353* clamping level.354*/355356static unsigned long highmem_dirtyable_memory(unsigned long total)357{358#ifdef CONFIG_HIGHMEM359int node;360unsigned long x = 0;361362for_each_node_state(node, N_HIGH_MEMORY) {363struct zone *z =364&NODE_DATA(node)->node_zones[ZONE_HIGHMEM];365366x += zone_page_state(z, NR_FREE_PAGES) +367zone_reclaimable_pages(z);368}369/*370* Make sure that the number of highmem pages is never larger371* than the number of the total dirtyable memory. This can only372* occur in very strange VM situations but we want to make sure373* that this does not occur.374*/375return min(x, total);376#else377return 0;378#endif379}380381/**382* determine_dirtyable_memory - amount of memory that may be used383*384* Returns the numebr of pages that can currently be freed and used385* by the kernel for direct mappings.386*/387unsigned long determine_dirtyable_memory(void)388{389unsigned long x;390391x = global_page_state(NR_FREE_PAGES) + global_reclaimable_pages();392393if (!vm_highmem_is_dirtyable)394x -= highmem_dirtyable_memory(x);395396return x + 1; /* Ensure that we never return 0 */397}398399/*400* global_dirty_limits - background-writeback and dirty-throttling thresholds401*402* Calculate the dirty thresholds based on sysctl parameters403* - vm.dirty_background_ratio or vm.dirty_background_bytes404* - vm.dirty_ratio or vm.dirty_bytes405* The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and406* real-time tasks.407*/408void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)409{410unsigned long background;411unsigned long dirty;412unsigned long uninitialized_var(available_memory);413struct task_struct *tsk;414415if (!vm_dirty_bytes || !dirty_background_bytes)416available_memory = determine_dirtyable_memory();417418if (vm_dirty_bytes)419dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);420else421dirty = (vm_dirty_ratio * available_memory) / 100;422423if (dirty_background_bytes)424background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);425else426background = (dirty_background_ratio * available_memory) / 100;427428if (background >= dirty)429background = dirty / 2;430tsk = current;431if (tsk->flags & PF_LESS_THROTTLE || rt_task(tsk)) {432background += background / 4;433dirty += dirty / 4;434}435*pbackground = background;436*pdirty = dirty;437}438439/*440* bdi_dirty_limit - @bdi's share of dirty throttling threshold441*442* Allocate high/low dirty limits to fast/slow devices, in order to prevent443* - starving fast devices444* - piling up dirty pages (that will take long time to sync) on slow devices445*446* The bdi's share of dirty limit will be adapting to its throughput and447* bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set.448*/449unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)450{451u64 bdi_dirty;452long numerator, denominator;453454/*455* Calculate this BDI's share of the dirty ratio.456*/457bdi_writeout_fraction(bdi, &numerator, &denominator);458459bdi_dirty = (dirty * (100 - bdi_min_ratio)) / 100;460bdi_dirty *= numerator;461do_div(bdi_dirty, denominator);462463bdi_dirty += (dirty * bdi->min_ratio) / 100;464if (bdi_dirty > (dirty * bdi->max_ratio) / 100)465bdi_dirty = dirty * bdi->max_ratio / 100;466467return bdi_dirty;468}469470/*471* balance_dirty_pages() must be called by processes which are generating dirty472* data. It looks at the number of dirty pages in the machine and will force473* the caller to perform writeback if the system is over `vm_dirty_ratio'.474* If we're over `background_thresh' then the writeback threads are woken to475* perform some writeout.476*/477static void balance_dirty_pages(struct address_space *mapping,478unsigned long write_chunk)479{480long nr_reclaimable, bdi_nr_reclaimable;481long nr_writeback, bdi_nr_writeback;482unsigned long background_thresh;483unsigned long dirty_thresh;484unsigned long bdi_thresh;485unsigned long pages_written = 0;486unsigned long pause = 1;487bool dirty_exceeded = false;488struct backing_dev_info *bdi = mapping->backing_dev_info;489490for (;;) {491struct writeback_control wbc = {492.sync_mode = WB_SYNC_NONE,493.older_than_this = NULL,494.nr_to_write = write_chunk,495.range_cyclic = 1,496};497498nr_reclaimable = global_page_state(NR_FILE_DIRTY) +499global_page_state(NR_UNSTABLE_NFS);500nr_writeback = global_page_state(NR_WRITEBACK);501502global_dirty_limits(&background_thresh, &dirty_thresh);503504/*505* Throttle it only when the background writeback cannot506* catch-up. This avoids (excessively) small writeouts507* when the bdi limits are ramping up.508*/509if (nr_reclaimable + nr_writeback <=510(background_thresh + dirty_thresh) / 2)511break;512513bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh);514bdi_thresh = task_dirty_limit(current, bdi_thresh);515516/*517* In order to avoid the stacked BDI deadlock we need518* to ensure we accurately count the 'dirty' pages when519* the threshold is low.520*521* Otherwise it would be possible to get thresh+n pages522* reported dirty, even though there are thresh-m pages523* actually dirty; with m+n sitting in the percpu524* deltas.525*/526if (bdi_thresh < 2*bdi_stat_error(bdi)) {527bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);528bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);529} else {530bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);531bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);532}533534/*535* The bdi thresh is somehow "soft" limit derived from the536* global "hard" limit. The former helps to prevent heavy IO537* bdi or process from holding back light ones; The latter is538* the last resort safeguard.539*/540dirty_exceeded =541(bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)542|| (nr_reclaimable + nr_writeback > dirty_thresh);543544if (!dirty_exceeded)545break;546547if (!bdi->dirty_exceeded)548bdi->dirty_exceeded = 1;549550/* Note: nr_reclaimable denotes nr_dirty + nr_unstable.551* Unstable writes are a feature of certain networked552* filesystems (i.e. NFS) in which data may have been553* written to the server's write cache, but has not yet554* been flushed to permanent storage.555* Only move pages to writeback if this bdi is over its556* threshold otherwise wait until the disk writes catch557* up.558*/559trace_wbc_balance_dirty_start(&wbc, bdi);560if (bdi_nr_reclaimable > bdi_thresh) {561writeback_inodes_wb(&bdi->wb, &wbc);562pages_written += write_chunk - wbc.nr_to_write;563trace_wbc_balance_dirty_written(&wbc, bdi);564if (pages_written >= write_chunk)565break; /* We've done our duty */566}567trace_wbc_balance_dirty_wait(&wbc, bdi);568__set_current_state(TASK_UNINTERRUPTIBLE);569io_schedule_timeout(pause);570571/*572* Increase the delay for each loop, up to our previous573* default of taking a 100ms nap.574*/575pause <<= 1;576if (pause > HZ / 10)577pause = HZ / 10;578}579580if (!dirty_exceeded && bdi->dirty_exceeded)581bdi->dirty_exceeded = 0;582583if (writeback_in_progress(bdi))584return;585586/*587* In laptop mode, we wait until hitting the higher threshold before588* starting background writeout, and then write out all the way down589* to the lower threshold. So slow writers cause minimal disk activity.590*591* In normal mode, we start background writeout at the lower592* background_thresh, to keep the amount of dirty memory low.593*/594if ((laptop_mode && pages_written) ||595(!laptop_mode && (nr_reclaimable > background_thresh)))596bdi_start_background_writeback(bdi);597}598599void set_page_dirty_balance(struct page *page, int page_mkwrite)600{601if (set_page_dirty(page) || page_mkwrite) {602struct address_space *mapping = page_mapping(page);603604if (mapping)605balance_dirty_pages_ratelimited(mapping);606}607}608609static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0;610611/**612* balance_dirty_pages_ratelimited_nr - balance dirty memory state613* @mapping: address_space which was dirtied614* @nr_pages_dirtied: number of pages which the caller has just dirtied615*616* Processes which are dirtying memory should call in here once for each page617* which was newly dirtied. The function will periodically check the system's618* dirty state and will initiate writeback if needed.619*620* On really big machines, get_writeback_state is expensive, so try to avoid621* calling it too often (ratelimiting). But once we're over the dirty memory622* limit we decrease the ratelimiting by a lot, to prevent individual processes623* from overshooting the limit by (ratelimit_pages) each.624*/625void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,626unsigned long nr_pages_dirtied)627{628unsigned long ratelimit;629unsigned long *p;630631ratelimit = ratelimit_pages;632if (mapping->backing_dev_info->dirty_exceeded)633ratelimit = 8;634635/*636* Check the rate limiting. Also, we do not want to throttle real-time637* tasks in balance_dirty_pages(). Period.638*/639preempt_disable();640p = &__get_cpu_var(bdp_ratelimits);641*p += nr_pages_dirtied;642if (unlikely(*p >= ratelimit)) {643ratelimit = sync_writeback_pages(*p);644*p = 0;645preempt_enable();646balance_dirty_pages(mapping, ratelimit);647return;648}649preempt_enable();650}651EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr);652653void throttle_vm_writeout(gfp_t gfp_mask)654{655unsigned long background_thresh;656unsigned long dirty_thresh;657658for ( ; ; ) {659global_dirty_limits(&background_thresh, &dirty_thresh);660661/*662* Boost the allowable dirty threshold a bit for page663* allocators so they don't get DoS'ed by heavy writers664*/665dirty_thresh += dirty_thresh / 10; /* wheeee... */666667if (global_page_state(NR_UNSTABLE_NFS) +668global_page_state(NR_WRITEBACK) <= dirty_thresh)669break;670congestion_wait(BLK_RW_ASYNC, HZ/10);671672/*673* The caller might hold locks which can prevent IO completion674* or progress in the filesystem. So we cannot just sit here675* waiting for IO to complete.676*/677if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))678break;679}680}681682/*683* sysctl handler for /proc/sys/vm/dirty_writeback_centisecs684*/685int dirty_writeback_centisecs_handler(ctl_table *table, int write,686void __user *buffer, size_t *length, loff_t *ppos)687{688proc_dointvec(table, write, buffer, length, ppos);689bdi_arm_supers_timer();690return 0;691}692693#ifdef CONFIG_BLOCK694void laptop_mode_timer_fn(unsigned long data)695{696struct request_queue *q = (struct request_queue *)data;697int nr_pages = global_page_state(NR_FILE_DIRTY) +698global_page_state(NR_UNSTABLE_NFS);699700/*701* We want to write everything out, not just down to the dirty702* threshold703*/704if (bdi_has_dirty_io(&q->backing_dev_info))705bdi_start_writeback(&q->backing_dev_info, nr_pages);706}707708/*709* We've spun up the disk and we're in laptop mode: schedule writeback710* of all dirty data a few seconds from now. If the flush is already scheduled711* then push it back - the user is still using the disk.712*/713void laptop_io_completion(struct backing_dev_info *info)714{715mod_timer(&info->laptop_mode_wb_timer, jiffies + laptop_mode);716}717718/*719* We're in laptop mode and we've just synced. The sync's writes will have720* caused another writeback to be scheduled by laptop_io_completion.721* Nothing needs to be written back anymore, so we unschedule the writeback.722*/723void laptop_sync_completion(void)724{725struct backing_dev_info *bdi;726727rcu_read_lock();728729list_for_each_entry_rcu(bdi, &bdi_list, bdi_list)730del_timer(&bdi->laptop_mode_wb_timer);731732rcu_read_unlock();733}734#endif735736/*737* If ratelimit_pages is too high then we can get into dirty-data overload738* if a large number of processes all perform writes at the same time.739* If it is too low then SMP machines will call the (expensive)740* get_writeback_state too often.741*742* Here we set ratelimit_pages to a level which ensures that when all CPUs are743* dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory744* thresholds before writeback cuts in.745*746* But the limit should not be set too high. Because it also controls the747* amount of memory which the balance_dirty_pages() caller has to write back.748* If this is too large then the caller will block on the IO queue all the749* time. So limit it to four megabytes - the balance_dirty_pages() caller750* will write six megabyte chunks, max.751*/752753void writeback_set_ratelimit(void)754{755ratelimit_pages = vm_total_pages / (num_online_cpus() * 32);756if (ratelimit_pages < 16)757ratelimit_pages = 16;758if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024)759ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE;760}761762static int __cpuinit763ratelimit_handler(struct notifier_block *self, unsigned long u, void *v)764{765writeback_set_ratelimit();766return NOTIFY_DONE;767}768769static struct notifier_block __cpuinitdata ratelimit_nb = {770.notifier_call = ratelimit_handler,771.next = NULL,772};773774/*775* Called early on to tune the page writeback dirty limits.776*777* We used to scale dirty pages according to how total memory778* related to pages that could be allocated for buffers (by779* comparing nr_free_buffer_pages() to vm_total_pages.780*781* However, that was when we used "dirty_ratio" to scale with782* all memory, and we don't do that any more. "dirty_ratio"783* is now applied to total non-HIGHPAGE memory (by subtracting784* totalhigh_pages from vm_total_pages), and as such we can't785* get into the old insane situation any more where we had786* large amounts of dirty pages compared to a small amount of787* non-HIGHMEM memory.788*789* But we might still want to scale the dirty_ratio by how790* much memory the box has..791*/792void __init page_writeback_init(void)793{794int shift;795796writeback_set_ratelimit();797register_cpu_notifier(&ratelimit_nb);798799shift = calc_period_shift();800prop_descriptor_init(&vm_completions, shift);801prop_descriptor_init(&vm_dirties, shift);802}803804/**805* tag_pages_for_writeback - tag pages to be written by write_cache_pages806* @mapping: address space structure to write807* @start: starting page index808* @end: ending page index (inclusive)809*810* This function scans the page range from @start to @end (inclusive) and tags811* all pages that have DIRTY tag set with a special TOWRITE tag. The idea is812* that write_cache_pages (or whoever calls this function) will then use813* TOWRITE tag to identify pages eligible for writeback. This mechanism is814* used to avoid livelocking of writeback by a process steadily creating new815* dirty pages in the file (thus it is important for this function to be quick816* so that it can tag pages faster than a dirtying process can create them).817*/818/*819* We tag pages in batches of WRITEBACK_TAG_BATCH to reduce tree_lock latency.820*/821void tag_pages_for_writeback(struct address_space *mapping,822pgoff_t start, pgoff_t end)823{824#define WRITEBACK_TAG_BATCH 4096825unsigned long tagged;826827do {828spin_lock_irq(&mapping->tree_lock);829tagged = radix_tree_range_tag_if_tagged(&mapping->page_tree,830&start, end, WRITEBACK_TAG_BATCH,831PAGECACHE_TAG_DIRTY, PAGECACHE_TAG_TOWRITE);832spin_unlock_irq(&mapping->tree_lock);833WARN_ON_ONCE(tagged > WRITEBACK_TAG_BATCH);834cond_resched();835/* We check 'start' to handle wrapping when end == ~0UL */836} while (tagged >= WRITEBACK_TAG_BATCH && start);837}838EXPORT_SYMBOL(tag_pages_for_writeback);839840/**841* write_cache_pages - walk the list of dirty pages of the given address space and write all of them.842* @mapping: address space structure to write843* @wbc: subtract the number of written pages from *@wbc->nr_to_write844* @writepage: function called for each page845* @data: data passed to writepage function846*847* If a page is already under I/O, write_cache_pages() skips it, even848* if it's dirty. This is desirable behaviour for memory-cleaning writeback,849* but it is INCORRECT for data-integrity system calls such as fsync(). fsync()850* and msync() need to guarantee that all the data which was dirty at the time851* the call was made get new I/O started against them. If wbc->sync_mode is852* WB_SYNC_ALL then we were called for data integrity and we must wait for853* existing IO to complete.854*855* To avoid livelocks (when other process dirties new pages), we first tag856* pages which should be written back with TOWRITE tag and only then start857* writing them. For data-integrity sync we have to be careful so that we do858* not miss some pages (e.g., because some other process has cleared TOWRITE859* tag we set). The rule we follow is that TOWRITE tag can be cleared only860* by the process clearing the DIRTY tag (and submitting the page for IO).861*/862int write_cache_pages(struct address_space *mapping,863struct writeback_control *wbc, writepage_t writepage,864void *data)865{866int ret = 0;867int done = 0;868struct pagevec pvec;869int nr_pages;870pgoff_t uninitialized_var(writeback_index);871pgoff_t index;872pgoff_t end; /* Inclusive */873pgoff_t done_index;874int cycled;875int range_whole = 0;876int tag;877878pagevec_init(&pvec, 0);879if (wbc->range_cyclic) {880writeback_index = mapping->writeback_index; /* prev offset */881index = writeback_index;882if (index == 0)883cycled = 1;884else885cycled = 0;886end = -1;887} else {888index = wbc->range_start >> PAGE_CACHE_SHIFT;889end = wbc->range_end >> PAGE_CACHE_SHIFT;890if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)891range_whole = 1;892cycled = 1; /* ignore range_cyclic tests */893}894if (wbc->sync_mode == WB_SYNC_ALL)895tag = PAGECACHE_TAG_TOWRITE;896else897tag = PAGECACHE_TAG_DIRTY;898retry:899if (wbc->sync_mode == WB_SYNC_ALL)900tag_pages_for_writeback(mapping, index, end);901done_index = index;902while (!done && (index <= end)) {903int i;904905nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,906min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);907if (nr_pages == 0)908break;909910for (i = 0; i < nr_pages; i++) {911struct page *page = pvec.pages[i];912913/*914* At this point, the page may be truncated or915* invalidated (changing page->mapping to NULL), or916* even swizzled back from swapper_space to tmpfs file917* mapping. However, page->index will not change918* because we have a reference on the page.919*/920if (page->index > end) {921/*922* can't be range_cyclic (1st pass) because923* end == -1 in that case.924*/925done = 1;926break;927}928929done_index = page->index;930931lock_page(page);932933/*934* Page truncated or invalidated. We can freely skip it935* then, even for data integrity operations: the page936* has disappeared concurrently, so there could be no937* real expectation of this data interity operation938* even if there is now a new, dirty page at the same939* pagecache address.940*/941if (unlikely(page->mapping != mapping)) {942continue_unlock:943unlock_page(page);944continue;945}946947if (!PageDirty(page)) {948/* someone wrote it for us */949goto continue_unlock;950}951952if (PageWriteback(page)) {953if (wbc->sync_mode != WB_SYNC_NONE)954wait_on_page_writeback(page);955else956goto continue_unlock;957}958959BUG_ON(PageWriteback(page));960if (!clear_page_dirty_for_io(page))961goto continue_unlock;962963trace_wbc_writepage(wbc, mapping->backing_dev_info);964ret = (*writepage)(page, wbc, data);965if (unlikely(ret)) {966if (ret == AOP_WRITEPAGE_ACTIVATE) {967unlock_page(page);968ret = 0;969} else {970/*971* done_index is set past this page,972* so media errors will not choke973* background writeout for the entire974* file. This has consequences for975* range_cyclic semantics (ie. it may976* not be suitable for data integrity977* writeout).978*/979done_index = page->index + 1;980done = 1;981break;982}983}984985/*986* We stop writing back only if we are not doing987* integrity sync. In case of integrity sync we have to988* keep going until we have written all the pages989* we tagged for writeback prior to entering this loop.990*/991if (--wbc->nr_to_write <= 0 &&992wbc->sync_mode == WB_SYNC_NONE) {993done = 1;994break;995}996}997pagevec_release(&pvec);998cond_resched();999}1000if (!cycled && !done) {1001/*1002* range_cyclic:1003* We hit the last page and there is more work to be done: wrap1004* back to the start of the file1005*/1006cycled = 1;1007index = 0;1008end = writeback_index - 1;1009goto retry;1010}1011if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))1012mapping->writeback_index = done_index;10131014return ret;1015}1016EXPORT_SYMBOL(write_cache_pages);10171018/*1019* Function used by generic_writepages to call the real writepage1020* function and set the mapping flags on error1021*/1022static int __writepage(struct page *page, struct writeback_control *wbc,1023void *data)1024{1025struct address_space *mapping = data;1026int ret = mapping->a_ops->writepage(page, wbc);1027mapping_set_error(mapping, ret);1028return ret;1029}10301031/**1032* generic_writepages - walk the list of dirty pages of the given address space and writepage() all of them.1033* @mapping: address space structure to write1034* @wbc: subtract the number of written pages from *@wbc->nr_to_write1035*1036* This is a library function, which implements the writepages()1037* address_space_operation.1038*/1039int generic_writepages(struct address_space *mapping,1040struct writeback_control *wbc)1041{1042struct blk_plug plug;1043int ret;10441045/* deal with chardevs and other special file */1046if (!mapping->a_ops->writepage)1047return 0;10481049blk_start_plug(&plug);1050ret = write_cache_pages(mapping, wbc, __writepage, mapping);1051blk_finish_plug(&plug);1052return ret;1053}10541055EXPORT_SYMBOL(generic_writepages);10561057int do_writepages(struct address_space *mapping, struct writeback_control *wbc)1058{1059int ret;10601061if (wbc->nr_to_write <= 0)1062return 0;1063if (mapping->a_ops->writepages)1064ret = mapping->a_ops->writepages(mapping, wbc);1065else1066ret = generic_writepages(mapping, wbc);1067return ret;1068}10691070/**1071* write_one_page - write out a single page and optionally wait on I/O1072* @page: the page to write1073* @wait: if true, wait on writeout1074*1075* The page must be locked by the caller and will be unlocked upon return.1076*1077* write_one_page() returns a negative error code if I/O failed.1078*/1079int write_one_page(struct page *page, int wait)1080{1081struct address_space *mapping = page->mapping;1082int ret = 0;1083struct writeback_control wbc = {1084.sync_mode = WB_SYNC_ALL,1085.nr_to_write = 1,1086};10871088BUG_ON(!PageLocked(page));10891090if (wait)1091wait_on_page_writeback(page);10921093if (clear_page_dirty_for_io(page)) {1094page_cache_get(page);1095ret = mapping->a_ops->writepage(page, &wbc);1096if (ret == 0 && wait) {1097wait_on_page_writeback(page);1098if (PageError(page))1099ret = -EIO;1100}1101page_cache_release(page);1102} else {1103unlock_page(page);1104}1105return ret;1106}1107EXPORT_SYMBOL(write_one_page);11081109/*1110* For address_spaces which do not use buffers nor write back.1111*/1112int __set_page_dirty_no_writeback(struct page *page)1113{1114if (!PageDirty(page))1115return !TestSetPageDirty(page);1116return 0;1117}11181119/*1120* Helper function for set_page_dirty family.1121* NOTE: This relies on being atomic wrt interrupts.1122*/1123void account_page_dirtied(struct page *page, struct address_space *mapping)1124{1125if (mapping_cap_account_dirty(mapping)) {1126__inc_zone_page_state(page, NR_FILE_DIRTY);1127__inc_zone_page_state(page, NR_DIRTIED);1128__inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);1129task_dirty_inc(current);1130task_io_account_write(PAGE_CACHE_SIZE);1131}1132}1133EXPORT_SYMBOL(account_page_dirtied);11341135/*1136* Helper function for set_page_writeback family.1137* NOTE: Unlike account_page_dirtied this does not rely on being atomic1138* wrt interrupts.1139*/1140void account_page_writeback(struct page *page)1141{1142inc_zone_page_state(page, NR_WRITEBACK);1143inc_zone_page_state(page, NR_WRITTEN);1144}1145EXPORT_SYMBOL(account_page_writeback);11461147/*1148* For address_spaces which do not use buffers. Just tag the page as dirty in1149* its radix tree.1150*1151* This is also used when a single buffer is being dirtied: we want to set the1152* page dirty in that case, but not all the buffers. This is a "bottom-up"1153* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.1154*1155* Most callers have locked the page, which pins the address_space in memory.1156* But zap_pte_range() does not lock the page, however in that case the1157* mapping is pinned by the vma's ->vm_file reference.1158*1159* We take care to handle the case where the page was truncated from the1160* mapping by re-checking page_mapping() inside tree_lock.1161*/1162int __set_page_dirty_nobuffers(struct page *page)1163{1164if (!TestSetPageDirty(page)) {1165struct address_space *mapping = page_mapping(page);1166struct address_space *mapping2;11671168if (!mapping)1169return 1;11701171spin_lock_irq(&mapping->tree_lock);1172mapping2 = page_mapping(page);1173if (mapping2) { /* Race with truncate? */1174BUG_ON(mapping2 != mapping);1175WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));1176account_page_dirtied(page, mapping);1177radix_tree_tag_set(&mapping->page_tree,1178page_index(page), PAGECACHE_TAG_DIRTY);1179}1180spin_unlock_irq(&mapping->tree_lock);1181if (mapping->host) {1182/* !PageAnon && !swapper_space */1183__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);1184}1185return 1;1186}1187return 0;1188}1189EXPORT_SYMBOL(__set_page_dirty_nobuffers);11901191/*1192* When a writepage implementation decides that it doesn't want to write this1193* page for some reason, it should redirty the locked page via1194* redirty_page_for_writepage() and it should then unlock the page and return 01195*/1196int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page)1197{1198wbc->pages_skipped++;1199return __set_page_dirty_nobuffers(page);1200}1201EXPORT_SYMBOL(redirty_page_for_writepage);12021203/*1204* Dirty a page.1205*1206* For pages with a mapping this should be done under the page lock1207* for the benefit of asynchronous memory errors who prefer a consistent1208* dirty state. This rule can be broken in some special cases,1209* but should be better not to.1210*1211* If the mapping doesn't provide a set_page_dirty a_op, then1212* just fall through and assume that it wants buffer_heads.1213*/1214int set_page_dirty(struct page *page)1215{1216struct address_space *mapping = page_mapping(page);12171218if (likely(mapping)) {1219int (*spd)(struct page *) = mapping->a_ops->set_page_dirty;1220/*1221* readahead/lru_deactivate_page could remain1222* PG_readahead/PG_reclaim due to race with end_page_writeback1223* About readahead, if the page is written, the flags would be1224* reset. So no problem.1225* About lru_deactivate_page, if the page is redirty, the flag1226* will be reset. So no problem. but if the page is used by readahead1227* it will confuse readahead and make it restart the size rampup1228* process. But it's a trivial problem.1229*/1230ClearPageReclaim(page);1231#ifdef CONFIG_BLOCK1232if (!spd)1233spd = __set_page_dirty_buffers;1234#endif1235return (*spd)(page);1236}1237if (!PageDirty(page)) {1238if (!TestSetPageDirty(page))1239return 1;1240}1241return 0;1242}1243EXPORT_SYMBOL(set_page_dirty);12441245/*1246* set_page_dirty() is racy if the caller has no reference against1247* page->mapping->host, and if the page is unlocked. This is because another1248* CPU could truncate the page off the mapping and then free the mapping.1249*1250* Usually, the page _is_ locked, or the caller is a user-space process which1251* holds a reference on the inode by having an open file.1252*1253* In other cases, the page should be locked before running set_page_dirty().1254*/1255int set_page_dirty_lock(struct page *page)1256{1257int ret;12581259lock_page(page);1260ret = set_page_dirty(page);1261unlock_page(page);1262return ret;1263}1264EXPORT_SYMBOL(set_page_dirty_lock);12651266/*1267* Clear a page's dirty flag, while caring for dirty memory accounting.1268* Returns true if the page was previously dirty.1269*1270* This is for preparing to put the page under writeout. We leave the page1271* tagged as dirty in the radix tree so that a concurrent write-for-sync1272* can discover it via a PAGECACHE_TAG_DIRTY walk. The ->writepage1273* implementation will run either set_page_writeback() or set_page_dirty(),1274* at which stage we bring the page's dirty flag and radix-tree dirty tag1275* back into sync.1276*1277* This incoherency between the page's dirty flag and radix-tree tag is1278* unfortunate, but it only exists while the page is locked.1279*/1280int clear_page_dirty_for_io(struct page *page)1281{1282struct address_space *mapping = page_mapping(page);12831284BUG_ON(!PageLocked(page));12851286if (mapping && mapping_cap_account_dirty(mapping)) {1287/*1288* Yes, Virginia, this is indeed insane.1289*1290* We use this sequence to make sure that1291* (a) we account for dirty stats properly1292* (b) we tell the low-level filesystem to1293* mark the whole page dirty if it was1294* dirty in a pagetable. Only to then1295* (c) clean the page again and return 1 to1296* cause the writeback.1297*1298* This way we avoid all nasty races with the1299* dirty bit in multiple places and clearing1300* them concurrently from different threads.1301*1302* Note! Normally the "set_page_dirty(page)"1303* has no effect on the actual dirty bit - since1304* that will already usually be set. But we1305* need the side effects, and it can help us1306* avoid races.1307*1308* We basically use the page "master dirty bit"1309* as a serialization point for all the different1310* threads doing their things.1311*/1312if (page_mkclean(page))1313set_page_dirty(page);1314/*1315* We carefully synchronise fault handlers against1316* installing a dirty pte and marking the page dirty1317* at this point. We do this by having them hold the1318* page lock at some point after installing their1319* pte, but before marking the page dirty.1320* Pages are always locked coming in here, so we get1321* the desired exclusion. See mm/memory.c:do_wp_page()1322* for more comments.1323*/1324if (TestClearPageDirty(page)) {1325dec_zone_page_state(page, NR_FILE_DIRTY);1326dec_bdi_stat(mapping->backing_dev_info,1327BDI_RECLAIMABLE);1328return 1;1329}1330return 0;1331}1332return TestClearPageDirty(page);1333}1334EXPORT_SYMBOL(clear_page_dirty_for_io);13351336int test_clear_page_writeback(struct page *page)1337{1338struct address_space *mapping = page_mapping(page);1339int ret;13401341if (mapping) {1342struct backing_dev_info *bdi = mapping->backing_dev_info;1343unsigned long flags;13441345spin_lock_irqsave(&mapping->tree_lock, flags);1346ret = TestClearPageWriteback(page);1347if (ret) {1348radix_tree_tag_clear(&mapping->page_tree,1349page_index(page),1350PAGECACHE_TAG_WRITEBACK);1351if (bdi_cap_account_writeback(bdi)) {1352__dec_bdi_stat(bdi, BDI_WRITEBACK);1353__bdi_writeout_inc(bdi);1354}1355}1356spin_unlock_irqrestore(&mapping->tree_lock, flags);1357} else {1358ret = TestClearPageWriteback(page);1359}1360if (ret)1361dec_zone_page_state(page, NR_WRITEBACK);1362return ret;1363}13641365int test_set_page_writeback(struct page *page)1366{1367struct address_space *mapping = page_mapping(page);1368int ret;13691370if (mapping) {1371struct backing_dev_info *bdi = mapping->backing_dev_info;1372unsigned long flags;13731374spin_lock_irqsave(&mapping->tree_lock, flags);1375ret = TestSetPageWriteback(page);1376if (!ret) {1377radix_tree_tag_set(&mapping->page_tree,1378page_index(page),1379PAGECACHE_TAG_WRITEBACK);1380if (bdi_cap_account_writeback(bdi))1381__inc_bdi_stat(bdi, BDI_WRITEBACK);1382}1383if (!PageDirty(page))1384radix_tree_tag_clear(&mapping->page_tree,1385page_index(page),1386PAGECACHE_TAG_DIRTY);1387radix_tree_tag_clear(&mapping->page_tree,1388page_index(page),1389PAGECACHE_TAG_TOWRITE);1390spin_unlock_irqrestore(&mapping->tree_lock, flags);1391} else {1392ret = TestSetPageWriteback(page);1393}1394if (!ret)1395account_page_writeback(page);1396return ret;13971398}1399EXPORT_SYMBOL(test_set_page_writeback);14001401/*1402* Return true if any of the pages in the mapping are marked with the1403* passed tag.1404*/1405int mapping_tagged(struct address_space *mapping, int tag)1406{1407int ret;1408rcu_read_lock();1409ret = radix_tree_tagged(&mapping->page_tree, tag);1410rcu_read_unlock();1411return ret;1412}1413EXPORT_SYMBOL(mapping_tagged);141414151416