diff options
author | Thierry Reding <treding@nvidia.com> | 2020-02-07 16:50:52 +0100 |
---|---|---|
committer | Thierry Reding <treding@nvidia.com> | 2021-12-16 14:07:06 +0100 |
commit | 1f39b1dfa53c84b56d7ad37fed44afda7004959d (patch) | |
tree | 8ef62c2b9b69fbf96e0cdad49362f2c4c17e0682 /include | |
parent | c6aeaf56f468a565f6d2f27325fc07d35cdcd3cb (diff) |
drm/tegra: Implement buffer object cache
This cache is used to avoid mapping and unmapping buffer objects
unnecessarily. Mappings are cached per client and stay hot until
the buffer object is destroyed.
Signed-off-by: Thierry Reding <treding@nvidia.com>
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/host1x.h | 53 |
1 files changed, 43 insertions, 10 deletions
diff --git a/include/linux/host1x.h b/include/linux/host1x.h index 157326074df8..d0bb16cdd005 100644 --- a/include/linux/host1x.h +++ b/include/linux/host1x.h @@ -8,6 +8,7 @@ #include <linux/device.h> #include <linux/dma-direction.h> +#include <linux/spinlock.h> #include <linux/types.h> enum host1x_class { @@ -25,6 +26,28 @@ struct iommu_group; u64 host1x_get_dma_mask(struct host1x *host1x); /** + * struct host1x_bo_cache - host1x buffer object cache + * @mappings: list of mappings + * @lock: synchronizes accesses to the list of mappings + */ +struct host1x_bo_cache { + struct list_head mappings; + struct mutex lock; +}; + +static inline void host1x_bo_cache_init(struct host1x_bo_cache *cache) +{ + INIT_LIST_HEAD(&cache->mappings); + mutex_init(&cache->lock); +} + +static inline void host1x_bo_cache_destroy(struct host1x_bo_cache *cache) +{ + /* XXX warn if not empty? */ + mutex_destroy(&cache->lock); +} + +/** * struct host1x_client_ops - host1x client operations * @early_init: host1x client early initialization code * @init: host1x client initialization code @@ -74,6 +97,8 @@ struct host1x_client { struct host1x_client *parent; unsigned int usecount; struct mutex lock; + + struct host1x_bo_cache cache; }; /* @@ -84,16 +109,26 @@ struct host1x_bo; struct sg_table; struct host1x_bo_mapping { + struct kref ref; struct dma_buf_attachment *attach; enum dma_data_direction direction; + struct list_head list; struct host1x_bo *bo; struct sg_table *sgt; unsigned int chunks; struct device *dev; dma_addr_t phys; size_t size; + + struct host1x_bo_cache *cache; + struct list_head entry; }; +static inline struct host1x_bo_mapping *to_host1x_bo_mapping(struct kref *ref) +{ + return container_of(ref, struct host1x_bo_mapping, ref); +} + struct host1x_bo_ops { struct host1x_bo *(*get)(struct host1x_bo *bo); void (*put)(struct host1x_bo *bo); @@ -106,11 +141,15 @@ struct host1x_bo_ops { struct host1x_bo { const struct host1x_bo_ops *ops; + struct list_head mappings; + spinlock_t lock; }; static inline void host1x_bo_init(struct host1x_bo *bo, const struct host1x_bo_ops *ops) { + INIT_LIST_HEAD(&bo->mappings); + spin_lock_init(&bo->lock); bo->ops = ops; } @@ -124,16 +163,10 @@ static inline void host1x_bo_put(struct host1x_bo *bo) bo->ops->put(bo); } -static inline struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, - enum dma_data_direction dir) -{ - return bo->ops->pin(dev, bo, dir); -} - -static inline void host1x_bo_unpin(struct host1x_bo_mapping *map) -{ - map->bo->ops->unpin(map); -} +struct host1x_bo_mapping *host1x_bo_pin(struct device *dev, struct host1x_bo *bo, + enum dma_data_direction dir, + struct host1x_bo_cache *cache); +void host1x_bo_unpin(struct host1x_bo_mapping *map); static inline void *host1x_bo_mmap(struct host1x_bo *bo) { |