mirror of
https://github.com/torvalds/linux.git
synced 2026-01-25 07:47:50 +00:00
hwspinlock: Remove unused (devm_)hwspin_lock_request()
devm_hwspin_lock_request() was added by 2018's
commit 4f1acd758b ("hwspinlock: Add devm_xxx() APIs to request/free
hwlock") however, it's never been used, everyone uses the
devm_hwspin_lock_request_specific() call instead.
Remove it.
Similarly, the none-devm variant isn't used.
Remove it, and the referring documentation.
Signed-off-by: Dr. David Alan Gilbert <linux@treblig.org>
Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
Link: https://lore.kernel.org/r/20241027205445.239108-1-linux@treblig.org
Signed-off-by: Bjorn Andersson <andersson@kernel.org>
This commit is contained in:
committed by
Bjorn Andersson
parent
a64dcfb451
commit
e9a3682d17
@@ -38,17 +38,6 @@ independent, drivers.
|
||||
User API
|
||||
========
|
||||
|
||||
::
|
||||
|
||||
struct hwspinlock *hwspin_lock_request(void);
|
||||
|
||||
Dynamically assign an hwspinlock and return its address, or NULL
|
||||
in case an unused hwspinlock isn't available. Users of this
|
||||
API will usually want to communicate the lock's id to the remote core
|
||||
before it can be used to achieve synchronization.
|
||||
|
||||
Should be called from a process context (might sleep).
|
||||
|
||||
::
|
||||
|
||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||
@@ -331,40 +320,7 @@ Typical usage
|
||||
#include <linux/hwspinlock.h>
|
||||
#include <linux/err.h>
|
||||
|
||||
int hwspinlock_example1(void)
|
||||
{
|
||||
struct hwspinlock *hwlock;
|
||||
int ret;
|
||||
|
||||
/* dynamically assign a hwspinlock */
|
||||
hwlock = hwspin_lock_request();
|
||||
if (!hwlock)
|
||||
...
|
||||
|
||||
id = hwspin_lock_get_id(hwlock);
|
||||
/* probably need to communicate id to a remote processor now */
|
||||
|
||||
/* take the lock, spin for 1 sec if it's already taken */
|
||||
ret = hwspin_lock_timeout(hwlock, 1000);
|
||||
if (ret)
|
||||
...
|
||||
|
||||
/*
|
||||
* we took the lock, do our thing now, but do NOT sleep
|
||||
*/
|
||||
|
||||
/* release the lock */
|
||||
hwspin_unlock(hwlock);
|
||||
|
||||
/* free the lock */
|
||||
ret = hwspin_lock_free(hwlock);
|
||||
if (ret)
|
||||
...
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int hwspinlock_example2(void)
|
||||
int hwspinlock_example(void)
|
||||
{
|
||||
struct hwspinlock *hwlock;
|
||||
int ret;
|
||||
|
||||
@@ -726,49 +726,6 @@ int hwspin_lock_get_id(struct hwspinlock *hwlock)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_get_id);
|
||||
|
||||
/**
|
||||
* hwspin_lock_request() - request an hwspinlock
|
||||
*
|
||||
* This function should be called by users of the hwspinlock device,
|
||||
* in order to dynamically assign them an unused hwspinlock.
|
||||
* Usually the user of this lock will then have to communicate the lock's id
|
||||
* to the remote core before it can be used for synchronization (to get the
|
||||
* id of a given hwlock, use hwspin_lock_get_id()).
|
||||
*
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns: the address of the assigned hwspinlock, or %NULL on error
|
||||
*/
|
||||
struct hwspinlock *hwspin_lock_request(void)
|
||||
{
|
||||
struct hwspinlock *hwlock;
|
||||
int ret;
|
||||
|
||||
mutex_lock(&hwspinlock_tree_lock);
|
||||
|
||||
/* look for an unused lock */
|
||||
ret = radix_tree_gang_lookup_tag(&hwspinlock_tree, (void **)&hwlock,
|
||||
0, 1, HWSPINLOCK_UNUSED);
|
||||
if (ret == 0) {
|
||||
pr_warn("a free hwspinlock is not available\n");
|
||||
hwlock = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* sanity check that should never fail */
|
||||
WARN_ON(ret > 1);
|
||||
|
||||
/* mark as used and power up */
|
||||
ret = __hwspin_lock_request(hwlock);
|
||||
if (ret < 0)
|
||||
hwlock = NULL;
|
||||
|
||||
out:
|
||||
mutex_unlock(&hwspinlock_tree_lock);
|
||||
return hwlock;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(hwspin_lock_request);
|
||||
|
||||
/**
|
||||
* hwspin_lock_request_specific() - request for a specific hwspinlock
|
||||
* @id: index of the specific hwspinlock that is requested
|
||||
@@ -912,40 +869,6 @@ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_hwspin_lock_free);
|
||||
|
||||
/**
|
||||
* devm_hwspin_lock_request() - request an hwspinlock for a managed device
|
||||
* @dev: the device to request an hwspinlock
|
||||
*
|
||||
* This function should be called by users of the hwspinlock device,
|
||||
* in order to dynamically assign them an unused hwspinlock.
|
||||
* Usually the user of this lock will then have to communicate the lock's id
|
||||
* to the remote core before it can be used for synchronization (to get the
|
||||
* id of a given hwlock, use hwspin_lock_get_id()).
|
||||
*
|
||||
* Should be called from a process context (might sleep)
|
||||
*
|
||||
* Returns: the address of the assigned hwspinlock, or %NULL on error
|
||||
*/
|
||||
struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
|
||||
{
|
||||
struct hwspinlock **ptr, *hwlock;
|
||||
|
||||
ptr = devres_alloc(devm_hwspin_lock_release, sizeof(*ptr), GFP_KERNEL);
|
||||
if (!ptr)
|
||||
return NULL;
|
||||
|
||||
hwlock = hwspin_lock_request();
|
||||
if (hwlock) {
|
||||
*ptr = hwlock;
|
||||
devres_add(dev, ptr);
|
||||
} else {
|
||||
devres_free(ptr);
|
||||
}
|
||||
|
||||
return hwlock;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(devm_hwspin_lock_request);
|
||||
|
||||
/**
|
||||
* devm_hwspin_lock_request_specific() - request for a specific hwspinlock for
|
||||
* a managed device
|
||||
|
||||
@@ -58,7 +58,6 @@ struct hwspinlock_pdata {
|
||||
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
|
||||
const struct hwspinlock_ops *ops, int base_id, int num_locks);
|
||||
int hwspin_lock_unregister(struct hwspinlock_device *bank);
|
||||
struct hwspinlock *hwspin_lock_request(void);
|
||||
struct hwspinlock *hwspin_lock_request_specific(unsigned int id);
|
||||
int hwspin_lock_free(struct hwspinlock *hwlock);
|
||||
int of_hwspin_lock_get_id(struct device_node *np, int index);
|
||||
@@ -70,7 +69,6 @@ void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
|
||||
int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
|
||||
int hwspin_lock_bust(struct hwspinlock *hwlock, unsigned int id);
|
||||
int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
|
||||
struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
|
||||
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
|
||||
unsigned int id);
|
||||
int devm_hwspin_lock_unregister(struct device *dev,
|
||||
@@ -95,11 +93,6 @@ int devm_hwspin_lock_register(struct device *dev,
|
||||
* Note: ERR_PTR(-ENODEV) will still be considered a success for NULL-checking
|
||||
* users. Others, which care, can still check this with IS_ERR.
|
||||
*/
|
||||
static inline struct hwspinlock *hwspin_lock_request(void)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline struct hwspinlock *hwspin_lock_request_specific(unsigned int id)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
@@ -155,11 +148,6 @@ int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
|
||||
{
|
||||
return ERR_PTR(-ENODEV);
|
||||
}
|
||||
|
||||
static inline
|
||||
struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
|
||||
unsigned int id)
|
||||
|
||||
Reference in New Issue
Block a user