aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 13:56:39 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2022-05-23 13:56:39 -0700
commit115cd47132d71bd7e4aa1093e15d861a59e73a94 (patch)
tree42e457126de728c9328e4b9b09b5ca4852a590de /include
parentf6792c877a1cacc3b3eea7cb5b45857b3c484c51 (diff)
parent2aaf516084184e4e6f80da01b2b3ed882fd20a79 (diff)
downloadnf-115cd47132d71bd7e4aa1093e15d861a59e73a94.tar.gz
Merge tag 'for-5.19/block-2022-05-22' of git://git.kernel.dk/linux-block
Pull block updates from Jens Axboe: "Here are the core block changes for 5.19. This contains: - blk-throttle accounting fix (Laibin) - Series removing redundant assignments (Michal) - Expose bio cache via the bio_set, so that DM can use it (Mike) - Finish off the bio allocation interface cleanups by dealing with the weirdest member of the family. bio_kmalloc combines a kmalloc for the bio and bio_vecs with a hidden bio_init call and magic cleanup semantics (Christoph) - Clean up the block layer API so that APIs consumed by file systems are (almost) only struct block_device based, so that file systems don't have to poke into block layer internals like the request_queue (Christoph) - Clean up the blk_execute_rq* API (Christoph) - Clean up various lose end in the blk-cgroup code to make it easier to follow in preparation of reworking the blkcg assignment for bios (Christoph) - Fix use-after-free issues in BFQ when processes with merged queues get moved to different cgroups (Jan) - BFQ fixes (Jan) - Various fixes and cleanups (Bart, Chengming, Fanjun, Julia, Ming, Wolfgang, me)" * tag 'for-5.19/block-2022-05-22' of git://git.kernel.dk/linux-block: (83 commits) blk-mq: fix typo in comment bfq: Remove bfq_requeue_request_body() bfq: Remove superfluous conversion from RQ_BIC() bfq: Allow current waker to defend against a tentative one bfq: Relax waker detection for shared queues blk-cgroup: delete rcu_read_lock_held() WARN_ON_ONCE() blk-throttle: Set BIO_THROTTLED when bio has been throttled blk-cgroup: Remove unnecessary rcu_read_lock/unlock() blk-cgroup: always terminate io.stat lines block, bfq: make bfq_has_work() more accurate block, bfq: protect 'bfqd->queued' by 'bfqd->lock' block: cleanup the VM accounting in submit_bio block: Fix the bio.bi_opf comment block: reorder the REQ_ flags blk-iocost: combine local_stat and desc_stat to stat block: improve the error message from bio_check_eod block: allow passing a NULL bdev to bio_alloc_clone/bio_init_clone block: remove superfluous calls to blkcg_bio_issue_init kthread: unexport kthread_blkcg blk-cgroup: cleanup blkcg_maybe_throttle_current ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/backing-dev.h6
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blk-cgroup.h258
-rw-r--r--include/linux/blk_types.h21
-rw-r--r--include/linux/blkdev.h119
-rw-r--r--include/linux/blktrace_api.h10
-rw-r--r--include/linux/kthread.h4
-rw-r--r--include/target/target_core_backend.h4
8 files changed, 88 insertions, 344 deletions
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 87ce24d238f345..2bd073fa6bb53e 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -17,8 +17,6 @@
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
-struct blkcg;
-
static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
{
kref_get(&bdi->refcnt);
@@ -154,7 +152,7 @@ struct bdi_writeback *wb_get_create(struct backing_dev_info *bdi,
struct cgroup_subsys_state *memcg_css,
gfp_t gfp);
void wb_memcg_offline(struct mem_cgroup *memcg);
-void wb_blkcg_offline(struct blkcg *blkcg);
+void wb_blkcg_offline(struct cgroup_subsys_state *css);
/**
* inode_cgwb_enabled - test whether cgroup writeback is enabled on an inode
@@ -378,7 +376,7 @@ static inline void wb_memcg_offline(struct mem_cgroup *memcg)
{
}
-static inline void wb_blkcg_offline(struct blkcg *blkcg)
+static inline void wb_blkcg_offline(struct cgroup_subsys_state *css)
{
}
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 00450fd86bb437..1cf3738ef1ea6d 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -408,9 +408,7 @@ extern int bioset_init_from_src(struct bio_set *bs, struct bio_set *src);
struct bio *bio_alloc_bioset(struct block_device *bdev, unsigned short nr_vecs,
unsigned int opf, gfp_t gfp_mask,
struct bio_set *bs);
-struct bio *bio_alloc_kiocb(struct kiocb *kiocb, struct block_device *bdev,
- unsigned short nr_vecs, unsigned int opf, struct bio_set *bs);
-struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned short nr_iovecs);
+struct bio *bio_kmalloc(unsigned short nr_vecs, gfp_t gfp_mask);
extern void bio_put(struct bio *);
struct bio *bio_alloc_clone(struct block_device *bdev, struct bio *bio_src,
@@ -785,6 +783,12 @@ static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb)
bio->bi_opf |= REQ_NOWAIT;
}
+static inline void bio_clear_polled(struct bio *bio)
+{
+ /* can't support alloc cache if we turn off polling */
+ bio->bi_opf &= ~(REQ_POLLED | REQ_ALLOC_CACHE);
+}
+
struct bio *blk_next_bio(struct bio *bio, struct block_device *bdev,
unsigned int nr_pages, unsigned int opf, gfp_t gfp);
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 652cd05b0924c3..9f40dbc65f82c0 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -14,265 +14,39 @@
* Nauman Rafique <nauman@google.com>
*/
-#include <linux/cgroup.h>
-#include <linux/percpu.h>
-#include <linux/percpu_counter.h>
-#include <linux/u64_stats_sync.h>
-#include <linux/seq_file.h>
-#include <linux/radix-tree.h>
-#include <linux/blkdev.h>
-#include <linux/atomic.h>
-#include <linux/kthread.h>
-#include <linux/fs.h>
+#include <linux/types.h>
+
+struct bio;
+struct cgroup_subsys_state;
+struct request_queue;
#define FC_APPID_LEN 129
#ifdef CONFIG_BLK_CGROUP
-
-enum blkg_iostat_type {
- BLKG_IOSTAT_READ,
- BLKG_IOSTAT_WRITE,
- BLKG_IOSTAT_DISCARD,
-
- BLKG_IOSTAT_NR,
-};
-
-struct blkcg_gq;
-struct blkg_policy_data;
-
-struct blkcg {
- struct cgroup_subsys_state css;
- spinlock_t lock;
- refcount_t online_pin;
-
- struct radix_tree_root blkg_tree;
- struct blkcg_gq __rcu *blkg_hint;
- struct hlist_head blkg_list;
-
- struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
-
- struct list_head all_blkcgs_node;
-#ifdef CONFIG_BLK_CGROUP_FC_APPID
- char fc_app_id[FC_APPID_LEN];
-#endif
-#ifdef CONFIG_CGROUP_WRITEBACK
- struct list_head cgwb_list;
-#endif
-};
-
-struct blkg_iostat {
- u64 bytes[BLKG_IOSTAT_NR];
- u64 ios[BLKG_IOSTAT_NR];
-};
-
-struct blkg_iostat_set {
- struct u64_stats_sync sync;
- struct blkg_iostat cur;
- struct blkg_iostat last;
-};
-
-/* association between a blk cgroup and a request queue */
-struct blkcg_gq {
- /* Pointer to the associated request_queue */
- struct request_queue *q;
- struct list_head q_node;
- struct hlist_node blkcg_node;
- struct blkcg *blkcg;
-
- /* all non-root blkcg_gq's are guaranteed to have access to parent */
- struct blkcg_gq *parent;
-
- /* reference count */
- struct percpu_ref refcnt;
-
- /* is this blkg online? protected by both blkcg and q locks */
- bool online;
-
- struct blkg_iostat_set __percpu *iostat_cpu;
- struct blkg_iostat_set iostat;
-
- struct blkg_policy_data *pd[BLKCG_MAX_POLS];
-
- spinlock_t async_bio_lock;
- struct bio_list async_bios;
- union {
- struct work_struct async_bio_work;
- struct work_struct free_work;
- };
-
- atomic_t use_delay;
- atomic64_t delay_nsec;
- atomic64_t delay_start;
- u64 last_delay;
- int last_use;
-
- struct rcu_head rcu_head;
-};
-
extern struct cgroup_subsys_state * const blkcg_root_css;
-void blkcg_destroy_blkgs(struct blkcg *blkcg);
void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
void blkcg_maybe_throttle_current(void);
-
-static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
-{
- return css ? container_of(css, struct blkcg, css) : NULL;
-}
-
-/**
- * bio_blkcg - grab the blkcg associated with a bio
- * @bio: target bio
- *
- * This returns the blkcg associated with a bio, %NULL if not associated.
- * Callers are expected to either handle %NULL or know association has been
- * done prior to calling this.
- */
-static inline struct blkcg *bio_blkcg(struct bio *bio)
-{
- if (bio && bio->bi_blkg)
- return bio->bi_blkg->blkcg;
- return NULL;
-}
-
-static inline bool blk_cgroup_congested(void)
-{
- struct cgroup_subsys_state *css;
- bool ret = false;
-
- rcu_read_lock();
- css = kthread_blkcg();
- if (!css)
- css = task_css(current, io_cgrp_id);
- while (css) {
- if (atomic_read(&css->cgroup->congestion_count)) {
- ret = true;
- break;
- }
- css = css->parent;
- }
- rcu_read_unlock();
- return ret;
-}
-
-/**
- * blkcg_parent - get the parent of a blkcg
- * @blkcg: blkcg of interest
- *
- * Return the parent blkcg of @blkcg. Can be called anytime.
- */
-static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
-{
- return css_to_blkcg(blkcg->css.parent);
-}
-
-/**
- * blkcg_pin_online - pin online state
- * @blkcg: blkcg of interest
- *
- * While pinned, a blkcg is kept online. This is primarily used to
- * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
- * while an associated cgwb is still active.
- */
-static inline void blkcg_pin_online(struct blkcg *blkcg)
-{
- refcount_inc(&blkcg->online_pin);
-}
-
-/**
- * blkcg_unpin_online - unpin online state
- * @blkcg: blkcg of interest
- *
- * This is primarily used to impedance-match blkg and cgwb lifetimes so
- * that blkg doesn't go offline while an associated cgwb is still active.
- * When this count goes to zero, all active cgwbs have finished so the
- * blkcg can continue destruction by calling blkcg_destroy_blkgs().
- */
-static inline void blkcg_unpin_online(struct blkcg *blkcg)
-{
- do {
- if (!refcount_dec_and_test(&blkcg->online_pin))
- break;
- blkcg_destroy_blkgs(blkcg);
- blkcg = blkcg_parent(blkcg);
- } while (blkcg);
-}
+bool blk_cgroup_congested(void);
+void blkcg_pin_online(struct cgroup_subsys_state *blkcg_css);
+void blkcg_unpin_online(struct cgroup_subsys_state *blkcg_css);
+struct list_head *blkcg_get_cgwb_list(struct cgroup_subsys_state *css);
+struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio);
#else /* CONFIG_BLK_CGROUP */
-struct blkcg {
-};
-
-struct blkcg_gq {
-};
-
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
static inline void blkcg_maybe_throttle_current(void) { }
static inline bool blk_cgroup_congested(void) { return false; }
-
-#ifdef CONFIG_BLOCK
static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
-static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
-#endif /* CONFIG_BLOCK */
-
-#endif /* CONFIG_BLK_CGROUP */
-
-#ifdef CONFIG_BLK_CGROUP_FC_APPID
-/*
- * Sets the fc_app_id field associted to blkcg
- * @app_id: application identifier
- * @cgrp_id: cgroup id
- * @app_id_len: size of application identifier
- */
-static inline int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len)
-{
- struct cgroup *cgrp;
- struct cgroup_subsys_state *css;
- struct blkcg *blkcg;
- int ret = 0;
-
- if (app_id_len > FC_APPID_LEN)
- return -EINVAL;
-
- cgrp = cgroup_get_from_id(cgrp_id);
- if (!cgrp)
- return -ENOENT;
- css = cgroup_get_e_css(cgrp, &io_cgrp_subsys);
- if (!css) {
- ret = -ENOENT;
- goto out_cgrp_put;
- }
- blkcg = css_to_blkcg(css);
- /*
- * There is a slight race condition on setting the appid.
- * Worst case an I/O may not find the right id.
- * This is no different from the I/O we let pass while obtaining
- * the vmid from the fabric.
- * Adding the overhead of a lock is not necessary.
- */
- strlcpy(blkcg->fc_app_id, app_id, app_id_len);
- css_put(css);
-out_cgrp_put:
- cgroup_put(cgrp);
- return ret;
-}
-
-/**
- * blkcg_get_fc_appid - get the fc app identifier associated with a bio
- * @bio: target bio
- *
- * On success return the fc_app_id, on failure return NULL
- */
-static inline char *blkcg_get_fc_appid(struct bio *bio)
+static inline struct cgroup_subsys_state *bio_blkcg_css(struct bio *bio)
{
- if (bio && bio->bi_blkg &&
- (bio->bi_blkg->blkcg->fc_app_id[0] != '\0'))
- return bio->bi_blkg->blkcg->fc_app_id;
return NULL;
}
-#else
-static inline int blkcg_set_fc_appid(char *buf, u64 id, size_t len) { return -EINVAL; }
-static inline char *blkcg_get_fc_appid(struct bio *bio) { return NULL; }
-#endif /*CONFIG_BLK_CGROUP_FC_APPID*/
+#endif /* CONFIG_BLK_CGROUP */
+
+int blkcg_set_fc_appid(char *app_id, u64 cgrp_id, size_t app_id_len);
+char *blkcg_get_fc_appid(struct bio *bio);
+
#endif /* _BLK_CGROUP_H */
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 1973ef9bd40fcf..40e8154006110a 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -246,9 +246,8 @@ typedef unsigned int blk_qc_t;
struct bio {
struct bio *bi_next; /* request queue link */
struct block_device *bi_bdev;
- unsigned int bi_opf; /* bottom bits req flags,
- * top bits REQ_OP. Use
- * accessors.
+ unsigned int bi_opf; /* bottom bits REQ_OP, top bits
+ * req_flags.
*/
unsigned short bi_flags; /* BIO_* below */
unsigned short bi_ioprio;
@@ -329,7 +328,6 @@ enum {
BIO_QOS_MERGED, /* but went through rq_qos merge path */
BIO_REMAPPED,
BIO_ZONE_WRITE_LOCKED, /* Owns a zoned device zone write lock */
- BIO_PERCPU_CACHE, /* can participate in per-cpu alloc cache */
BIO_FLAG_LAST
};
@@ -409,15 +407,17 @@ enum req_flag_bits {
* work item to avoid such priority inversions.
*/
__REQ_CGROUP_PUNT,
+ __REQ_POLLED, /* caller polls for completion using bio_poll */
+ __REQ_ALLOC_CACHE, /* allocate IO from cache if available */
+ __REQ_SWAP, /* swap I/O */
+ __REQ_DRV, /* for driver use */
- /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ /*
+ * Command specific flags, keep last:
+ */
+ /* for REQ_OP_WRITE_ZEROES: */
__REQ_NOUNMAP, /* do not free blocks when zeroing */
- __REQ_POLLED, /* caller polls for completion using bio_poll */
-
- /* for driver use */
- __REQ_DRV,
- __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
@@ -439,6 +439,7 @@ enum req_flag_bits {
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_POLLED (1ULL << __REQ_POLLED)
+#define REQ_ALLOC_CACHE (1ULL << __REQ_ALLOC_CACHE)
#define REQ_DRV (1ULL << __REQ_DRV)
#define REQ_SWAP (1ULL << __REQ_SWAP)
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 60d01613899711..34724b15813b76 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -248,6 +248,7 @@ struct queue_limits {
unsigned int io_opt;
unsigned int max_discard_sectors;
unsigned int max_hw_discard_sectors;
+ unsigned int max_secure_erase_sectors;
unsigned int max_write_zeroes_sectors;
unsigned int max_zone_append_sectors;
unsigned int discard_granularity;
@@ -540,10 +541,8 @@ struct request_queue {
#define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */
#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
#define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */
-#define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
-#define QUEUE_FLAG_SECERASE 11 /* supports secure erase */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
#define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
@@ -582,11 +581,8 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
test_bit(QUEUE_FLAG_STABLE_WRITES, &(q)->queue_flags)
#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
-#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
#define blk_queue_zone_resetall(q) \
test_bit(QUEUE_FLAG_ZONE_RESETALL, &(q)->queue_flags)
-#define blk_queue_secure_erase(q) \
- (test_bit(QUEUE_FLAG_SECERASE, &(q)->queue_flags))
#define blk_queue_dax(q) test_bit(QUEUE_FLAG_DAX, &(q)->queue_flags)
#define blk_queue_pci_p2pdma(q) \
test_bit(QUEUE_FLAG_PCI_P2PDMA, &(q)->queue_flags)
@@ -602,7 +598,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
REQ_FAILFAST_DRIVER))
#define blk_queue_quiesced(q) test_bit(QUEUE_FLAG_QUIESCED, &(q)->queue_flags)
#define blk_queue_pm_only(q) atomic_read(&(q)->pm_only)
-#define blk_queue_fua(q) test_bit(QUEUE_FLAG_FUA, &(q)->queue_flags)
#define blk_queue_registered(q) test_bit(QUEUE_FLAG_REGISTERED, &(q)->queue_flags)
#define blk_queue_nowait(q) test_bit(QUEUE_FLAG_NOWAIT, &(q)->queue_flags)
@@ -950,6 +945,8 @@ extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_discard_segments(struct request_queue *,
unsigned short);
+void blk_queue_max_secure_erase_sectors(struct request_queue *q,
+ unsigned int max_sectors);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
extern void blk_queue_max_discard_sectors(struct request_queue *q,
unsigned int max_discard_sectors);
@@ -1090,13 +1087,12 @@ static inline long nr_blockdev_pages(void)
extern void blk_io_schedule(void);
-#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-
-extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
-extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, int flags,
- struct bio **biop);
+int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask);
+int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct bio **biop);
+int blkdev_issue_secure_erase(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp);
#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
@@ -1115,7 +1111,7 @@ static inline int sb_issue_discard(struct super_block *sb, sector_t block,
SECTOR_SHIFT),
nr_blocks << (sb->s_blocksize_bits -
SECTOR_SHIFT),
- gfp_mask, flags);
+ gfp_mask);
}
static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask)
@@ -1189,6 +1185,12 @@ static inline unsigned int queue_max_zone_append_sectors(const struct request_qu
return min(l->max_zone_append_sectors, l->max_sectors);
}
+static inline unsigned int
+bdev_max_zone_append_sectors(struct block_device *bdev)
+{
+ return queue_max_zone_append_sectors(bdev_get_queue(bdev));
+}
+
static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1246,84 +1248,54 @@ bdev_zone_write_granularity(struct block_device *bdev)
return queue_zone_write_granularity(bdev_get_queue(bdev));
}
-static inline int queue_alignment_offset(const struct request_queue *q)
-{
- if (q->limits.misaligned)
- return -1;
+int bdev_alignment_offset(struct block_device *bdev);
+unsigned int bdev_discard_alignment(struct block_device *bdev);
- return q->limits.alignment_offset;
+static inline unsigned int bdev_max_discard_sectors(struct block_device *bdev)
+{
+ return bdev_get_queue(bdev)->limits.max_discard_sectors;
}
-static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
+static inline unsigned int bdev_discard_granularity(struct block_device *bdev)
{
- unsigned int granularity = max(lim->physical_block_size, lim->io_min);
- unsigned int alignment = sector_div(sector, granularity >> SECTOR_SHIFT)
- << SECTOR_SHIFT;
+ return bdev_get_queue(bdev)->limits.discard_granularity;
+}
- return (granularity + lim->alignment_offset - alignment) % granularity;
+static inline unsigned int
+bdev_max_secure_erase_sectors(struct block_device *bdev)
+{
+ return bdev_get_queue(bdev)->limits.max_secure_erase_sectors;
}
-static inline int bdev_alignment_offset(struct block_device *bdev)
+static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
- if (q->limits.misaligned)
- return -1;
- if (bdev_is_partition(bdev))
- return queue_limit_alignment_offset(&q->limits,
- bdev->bd_start_sect);
- return q->limits.alignment_offset;
+ if (q)
+ return q->limits.max_write_zeroes_sectors;
+
+ return 0;
}
-static inline int queue_discard_alignment(const struct request_queue *q)
+static inline bool bdev_nonrot(struct block_device *bdev)
{
- if (q->limits.discard_misaligned)
- return -1;
-
- return q->limits.discard_alignment;
+ return blk_queue_nonrot(bdev_get_queue(bdev));
}
-static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
+static inline bool bdev_stable_writes(struct block_device *bdev)
{
- unsigned int alignment, granularity, offset;
-
- if (!lim->max_discard_sectors)
- return 0;
-
- /* Why are these in bytes, not sectors? */
- alignment = lim->discard_alignment >> SECTOR_SHIFT;
- granularity = lim->discard_granularity >> SECTOR_SHIFT;
- if (!granularity)
- return 0;
-
- /* Offset of the partition start in 'granularity' sectors */
- offset = sector_div(sector, granularity);
-
- /* And why do we do this modulus *again* in blkdev_issue_discard()? */
- offset = (granularity + alignment - offset) % granularity;
-
- /* Turn it back into bytes, gaah */
- return offset << SECTOR_SHIFT;
+ return test_bit(QUEUE_FLAG_STABLE_WRITES,
+ &bdev_get_queue(bdev)->queue_flags);
}
-static inline int bdev_discard_alignment(struct block_device *bdev)
+static inline bool bdev_write_cache(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (bdev_is_partition(bdev))
- return queue_limit_discard_alignment(&q->limits,
- bdev->bd_start_sect);
- return q->limits.discard_alignment;
+ return test_bit(QUEUE_FLAG_WC, &bdev_get_queue(bdev)->queue_flags);
}
-static inline unsigned int bdev_write_zeroes_sectors(struct block_device *bdev)
+static inline bool bdev_fua(struct block_device *bdev)
{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return q->limits.max_write_zeroes_sectors;
-
- return 0;
+ return test_bit(QUEUE_FLAG_FUA, &bdev_get_queue(bdev)->queue_flags);
}
static inline enum blk_zoned_model bdev_zoned_model(struct block_device *bdev)
@@ -1491,9 +1463,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter)
wake_up_process(waiter);
}
-unsigned long disk_start_io_acct(struct gendisk *disk, unsigned int sectors,
- unsigned int op);
-void disk_end_io_acct(struct gendisk *disk, unsigned int op,
+unsigned long bdev_start_io_acct(struct block_device *bdev,
+ unsigned int sectors, unsigned int op,
+ unsigned long start_time);
+void bdev_end_io_acct(struct block_device *bdev, unsigned int op,
unsigned long start_time);
void bio_start_io_acct_time(struct bio *bio, unsigned long start_time);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 22501a293fa545..623e22492afa50 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -27,12 +27,10 @@ struct blk_trace {
atomic_t dropped;
};
-struct blkcg;
-
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern __printf(3, 4)
-void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *fmt, ...);
+__printf(3, 4) void __blk_trace_note_message(struct blk_trace *bt,
+ struct cgroup_subsys_state *css, const char *fmt, ...);
/**
* blk_add_trace_msg - Add a (simple) message to the blktrace stream
@@ -47,14 +45,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
* NOTE: Can not use 'static inline' due to presence of var args...
*
**/
-#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
+#define blk_add_cgroup_trace_msg(q, css, fmt, ...) \
do { \
struct blk_trace *bt; \
\
rcu_read_lock(); \
bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
- __trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ __blk_trace_note_message(bt, css, fmt, ##__VA_ARGS__);\
rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index de5d75bafd6651..30e5bec81d2b62 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -222,9 +222,5 @@ void kthread_associate_blkcg(struct cgroup_subsys_state *css);
struct cgroup_subsys_state *kthread_blkcg(void);
#else
static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
-static inline struct cgroup_subsys_state *kthread_blkcg(void)
-{
- return NULL;
-}
#endif
#endif /* _LINUX_KTHREAD_H */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 675f3a1fe61394..773963a1e0b536 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -14,7 +14,7 @@
#define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2
#define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4
-struct request_queue;
+struct block_device;
struct scatterlist;
struct target_backend_ops {
@@ -117,7 +117,7 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q);
+ struct block_device *bdev);
static inline bool target_dev_configured(struct se_device *se_dev)
{