Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 | /* * High-level sync()-related operations */ #include <linux/kernel.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/module.h> #include <linux/writeback.h> #include <linux/syscalls.h> #include <linux/linkage.h> #include <linux/pagemap.h> #include <linux/quotaops.h> #include <linux/buffer_head.h> #define VALID_FLAGS (SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE| \ SYNC_FILE_RANGE_WAIT_AFTER) /* * sync everything. Start out by waking pdflush, because that writes back * all queues in parallel. */ static void do_sync(unsigned long wait) { wakeup_pdflush(0); sync_inodes(0); /* All mappings, inodes and their blockdevs */ DQUOT_SYNC(NULL); sync_supers(); /* Write the superblocks */ sync_filesystems(0); /* Start syncing the filesystems */ sync_filesystems(wait); /* Waitingly sync the filesystems */ sync_inodes(wait); /* Mappings, inodes and blockdevs, again. */ if (!wait) printk("Emergency Sync complete\n"); if (unlikely(laptop_mode)) laptop_sync_completion(); } asmlinkage long sys_sync(void) { do_sync(1); return 0; } void emergency_sync(void) { pdflush_operation(do_sync, 0); } /* * Generic function to fsync a file. * * filp may be NULL if called via the msync of a vma. */ int file_fsync(struct file *filp, struct dentry *dentry, int datasync) { struct inode * inode = dentry->d_inode; struct super_block * sb; int ret, err; /* sync the inode to buffers */ ret = write_inode_now(inode, 0); /* sync the superblock to buffers */ sb = inode->i_sb; lock_super(sb); if (sb->s_op->write_super) sb->s_op->write_super(sb); unlock_super(sb); /* .. finally sync the buffers to disk */ err = sync_blockdev(sb->s_bdev); if (!ret) ret = err; return ret; } long do_fsync(struct file *file, int datasync) { int ret; int err; struct address_space *mapping = file->f_mapping; if (!file->f_op || !file->f_op->fsync) { /* Why? We can still call filemap_fdatawrite */ ret = -EINVAL; goto out; } ret = filemap_fdatawrite(mapping); /* * We need to protect against concurrent writers, which could cause * livelocks in fsync_buffers_list(). */ mutex_lock(&mapping->host->i_mutex); err = file->f_op->fsync(file, file->f_dentry, datasync); if (!ret) ret = err; mutex_unlock(&mapping->host->i_mutex); err = filemap_fdatawait(mapping); if (!ret) ret = err; out: return ret; } static long __do_fsync(unsigned int fd, int datasync) { struct file *file; int ret = -EBADF; file = fget(fd); if (file) { ret = do_fsync(file, datasync); fput(file); } return ret; } asmlinkage long sys_fsync(unsigned int fd) { return __do_fsync(fd, 0); } asmlinkage long sys_fdatasync(unsigned int fd) { return __do_fsync(fd, 1); } /* * sys_sync_file_range() permits finely controlled syncing over a segment of * a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is * zero then sys_sync_file_range() will operate from offset out to EOF. * * The flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE: wait upon writeout of all pages in the range * before performing the write. * * SYNC_FILE_RANGE_WRITE: initiate writeout of all those dirty pages in the * range which are not presently under writeback. * * SYNC_FILE_RANGE_WAIT_AFTER: wait upon writeout of all pages in the range * after performing the write. * * Useful combinations of the flag bits are: * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE: ensures that all pages * in the range which were dirty on entry to sys_sync_file_range() are placed * under writeout. This is a start-write-for-data-integrity operation. * * SYNC_FILE_RANGE_WRITE: start writeout of all dirty pages in the range which * are not presently under writeout. This is an asynchronous flush-to-disk * operation. Not suitable for data integrity operations. * * SYNC_FILE_RANGE_WAIT_BEFORE (or SYNC_FILE_RANGE_WAIT_AFTER): wait for * completion of writeout of all pages in the range. This will be used after an * earlier SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE operation to wait * for that operation to complete and to return the result. * * SYNC_FILE_RANGE_WAIT_BEFORE|SYNC_FILE_RANGE_WRITE|SYNC_FILE_RANGE_WAIT_AFTER: * a traditional sync() operation. This is a write-for-data-integrity operation * which will ensure that all pages in the range which were dirty on entry to * sys_sync_file_range() are committed to disk. * * * SYNC_FILE_RANGE_WAIT_BEFORE and SYNC_FILE_RANGE_WAIT_AFTER will detect any * I/O errors or ENOSPC conditions and will return those to the caller, after * clearing the EIO and ENOSPC flags in the address_space. * * It should be noted that none of these operations write out the file's * metadata. So unless the application is strictly performing overwrites of * already-instantiated disk blocks, there are no guarantees here that the data * will be available after a crash. */ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes, unsigned int flags) { int ret; struct file *file; loff_t endbyte; /* inclusive */ int fput_needed; umode_t i_mode; ret = -EINVAL; if (flags & ~VALID_FLAGS) goto out; endbyte = offset + nbytes; if ((s64)offset < 0) goto out; if ((s64)endbyte < 0) goto out; if (endbyte < offset) goto out; if (sizeof(pgoff_t) == 4) { if (offset >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * The range starts outside a 32 bit machine's * pagecache addressing capabilities. Let it "succeed" */ ret = 0; goto out; } if (endbyte >= (0x100000000ULL << PAGE_CACHE_SHIFT)) { /* * Out to EOF */ nbytes = 0; } } if (nbytes == 0) endbyte = LLONG_MAX; else endbyte--; /* inclusive */ ret = -EBADF; file = fget_light(fd, &fput_needed); if (!file) goto out; i_mode = file->f_dentry->d_inode->i_mode; ret = -ESPIPE; if (!S_ISREG(i_mode) && !S_ISBLK(i_mode) && !S_ISDIR(i_mode) && !S_ISLNK(i_mode)) goto out_put; ret = do_sync_file_range(file, offset, endbyte, flags); out_put: fput_light(file, fput_needed); out: return ret; } /* * `endbyte' is inclusive */ int do_sync_file_range(struct file *file, loff_t offset, loff_t endbyte, unsigned int flags) { int ret; struct address_space *mapping; mapping = file->f_mapping; if (!mapping) { ret = -EINVAL; goto out; } ret = 0; if (flags & SYNC_FILE_RANGE_WAIT_BEFORE) { ret = wait_on_page_writeback_range(mapping, offset >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WRITE) { ret = __filemap_fdatawrite_range(mapping, offset, endbyte, WB_SYNC_NONE); if (ret < 0) goto out; } if (flags & SYNC_FILE_RANGE_WAIT_AFTER) { ret = wait_on_page_writeback_range(mapping, offset >> PAGE_CACHE_SHIFT, endbyte >> PAGE_CACHE_SHIFT); } out: return ret; } EXPORT_SYMBOL_GPL(do_sync_file_range); |