Snap for 10753213 from 58bccfa263
to android14-6.1-keystone-qcom-release
Change-Id: I041e979cf55b7270a94091d0d30d6b8eaa990d06
This commit is contained in:
commit
6c948d5ac1
@ -90370,10 +90370,9 @@ member {
|
||||
offset: 712
|
||||
}
|
||||
member {
|
||||
id: 0xf667d80f
|
||||
id: 0xf667dcee
|
||||
name: "fullness_list"
|
||||
type_id: 0xb8bf135c
|
||||
offset: 64
|
||||
}
|
||||
member {
|
||||
id: 0xfeb50ea0
|
||||
@ -101850,12 +101849,6 @@ member {
|
||||
type_id: 0x4585663f
|
||||
offset: 320
|
||||
}
|
||||
member {
|
||||
id: 0xad7c8a98
|
||||
name: "index"
|
||||
type_id: 0x4585663f
|
||||
offset: 672
|
||||
}
|
||||
member {
|
||||
id: 0xad7c8ba4
|
||||
name: "index"
|
||||
@ -101868,6 +101861,12 @@ member {
|
||||
type_id: 0x4585663f
|
||||
offset: 480
|
||||
}
|
||||
member {
|
||||
id: 0xad7c8d2b
|
||||
name: "index"
|
||||
type_id: 0x4585663f
|
||||
offset: 608
|
||||
}
|
||||
member {
|
||||
id: 0xad7c8d72
|
||||
name: "index"
|
||||
@ -113342,6 +113341,12 @@ member {
|
||||
type_id: 0xf313e71a
|
||||
offset: 768
|
||||
}
|
||||
member {
|
||||
id: 0x2d1fe43b
|
||||
name: "lock"
|
||||
type_id: 0xf313e71a
|
||||
offset: 17536
|
||||
}
|
||||
member {
|
||||
id: 0x2d1fe44c
|
||||
name: "lock"
|
||||
@ -121005,12 +121010,6 @@ member {
|
||||
type_id: 0x2c8b0a9f
|
||||
offset: 768
|
||||
}
|
||||
member {
|
||||
id: 0xdb33fcdf
|
||||
name: "migrate_lock"
|
||||
type_id: 0xf4933b90
|
||||
offset: 17536
|
||||
}
|
||||
member {
|
||||
id: 0x8edaa968
|
||||
name: "migrate_page"
|
||||
@ -133786,10 +133785,10 @@ member {
|
||||
type_id: 0xad7c0a89
|
||||
}
|
||||
member {
|
||||
id: 0x7a226550
|
||||
id: 0x7a226b7d
|
||||
name: "objs_per_zspage"
|
||||
type_id: 0x6720d32f
|
||||
offset: 608
|
||||
offset: 544
|
||||
}
|
||||
member {
|
||||
id: 0x33953b25
|
||||
@ -138582,10 +138581,10 @@ member {
|
||||
bitsize: 1
|
||||
}
|
||||
member {
|
||||
id: 0x338646f2
|
||||
id: 0x338649f9
|
||||
name: "pages_per_zspage"
|
||||
type_id: 0x6720d32f
|
||||
offset: 640
|
||||
offset: 576
|
||||
}
|
||||
member {
|
||||
id: 0xf9521fd2
|
||||
@ -170127,18 +170126,18 @@ member {
|
||||
type_id: 0x6720d32f
|
||||
offset: 96
|
||||
}
|
||||
member {
|
||||
id: 0xd91935d3
|
||||
name: "size"
|
||||
type_id: 0x6720d32f
|
||||
offset: 576
|
||||
}
|
||||
member {
|
||||
id: 0xd9193607
|
||||
name: "size"
|
||||
type_id: 0x6720d32f
|
||||
offset: 896
|
||||
}
|
||||
member {
|
||||
id: 0xd91937b9
|
||||
name: "size"
|
||||
type_id: 0x6720d32f
|
||||
offset: 512
|
||||
}
|
||||
member {
|
||||
id: 0xd9193b66
|
||||
name: "size"
|
||||
@ -175840,10 +175839,10 @@ member {
|
||||
offset: 896
|
||||
}
|
||||
member {
|
||||
id: 0xb91e0d04
|
||||
id: 0xb91e0940
|
||||
name: "stats"
|
||||
type_id: 0x6b61371d
|
||||
offset: 704
|
||||
offset: 640
|
||||
}
|
||||
member {
|
||||
id: 0xb920e0d3
|
||||
@ -243356,14 +243355,13 @@ struct_union {
|
||||
kind: STRUCT
|
||||
name: "size_class"
|
||||
definition {
|
||||
bytesize: 136
|
||||
member_id: 0x2d1fec85
|
||||
member_id: 0xf667d80f
|
||||
member_id: 0xd91935d3
|
||||
member_id: 0x7a226550
|
||||
member_id: 0x338646f2
|
||||
member_id: 0xad7c8a98
|
||||
member_id: 0xb91e0d04
|
||||
bytesize: 128
|
||||
member_id: 0xf667dcee
|
||||
member_id: 0xd91937b9
|
||||
member_id: 0x7a226b7d
|
||||
member_id: 0x338649f9
|
||||
member_id: 0xad7c8d2b
|
||||
member_id: 0xb91e0940
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
@ -259130,7 +259128,7 @@ struct_union {
|
||||
member_id: 0xb9089225
|
||||
member_id: 0x868caa9e
|
||||
member_id: 0x8a67a9e5
|
||||
member_id: 0xdb33fcdf
|
||||
member_id: 0x2d1fe43b
|
||||
}
|
||||
}
|
||||
struct_union {
|
||||
|
@ -66,18 +66,36 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
{
|
||||
struct dma_fence_array *result;
|
||||
struct dma_fence *tmp, **array;
|
||||
ktime_t timestamp;
|
||||
unsigned int i;
|
||||
size_t count;
|
||||
|
||||
count = 0;
|
||||
timestamp = ns_to_ktime(0);
|
||||
for (i = 0; i < num_fences; ++i) {
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i])
|
||||
if (!dma_fence_is_signaled(tmp))
|
||||
dma_fence_unwrap_for_each(tmp, &iter[i], fences[i]) {
|
||||
if (!dma_fence_is_signaled(tmp)) {
|
||||
++count;
|
||||
} else if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT,
|
||||
&tmp->flags)) {
|
||||
if (ktime_after(tmp->timestamp, timestamp))
|
||||
timestamp = tmp->timestamp;
|
||||
} else {
|
||||
/*
|
||||
* Use the current time if the fence is
|
||||
* currently signaling.
|
||||
*/
|
||||
timestamp = ktime_get();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* If we couldn't find a pending fence just return a private signaled
|
||||
* fence with the timestamp of the last signaled one.
|
||||
*/
|
||||
if (count == 0)
|
||||
return dma_fence_get_stub();
|
||||
return dma_fence_allocate_private_stub(timestamp);
|
||||
|
||||
array = kmalloc_array(count, sizeof(*array), GFP_KERNEL);
|
||||
if (!array)
|
||||
@ -138,7 +156,7 @@ struct dma_fence *__dma_fence_unwrap_merge(unsigned int num_fences,
|
||||
} while (tmp);
|
||||
|
||||
if (count == 0) {
|
||||
tmp = dma_fence_get_stub();
|
||||
tmp = dma_fence_allocate_private_stub(ktime_get());
|
||||
goto return_tmp;
|
||||
}
|
||||
|
||||
|
@ -150,16 +150,17 @@ EXPORT_SYMBOL(dma_fence_get_stub);
|
||||
|
||||
/**
|
||||
* dma_fence_allocate_private_stub - return a private, signaled fence
|
||||
* @timestamp: timestamp when the fence was signaled
|
||||
*
|
||||
* Return a newly allocated and signaled stub fence.
|
||||
*/
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp)
|
||||
{
|
||||
struct dma_fence *fence;
|
||||
|
||||
fence = kzalloc(sizeof(*fence), GFP_KERNEL);
|
||||
if (fence == NULL)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
return NULL;
|
||||
|
||||
dma_fence_init(fence,
|
||||
&dma_fence_stub_ops,
|
||||
@ -169,7 +170,7 @@ struct dma_fence *dma_fence_allocate_private_stub(void)
|
||||
set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
|
||||
&fence->flags);
|
||||
|
||||
dma_fence_signal(fence);
|
||||
dma_fence_signal_timestamp(fence, timestamp);
|
||||
|
||||
return fence;
|
||||
}
|
||||
|
@ -353,10 +353,10 @@ EXPORT_SYMBOL(drm_syncobj_replace_fence);
|
||||
*/
|
||||
static int drm_syncobj_assign_null_handle(struct drm_syncobj *syncobj)
|
||||
{
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub();
|
||||
struct dma_fence *fence = dma_fence_allocate_private_stub(ktime_get());
|
||||
|
||||
if (IS_ERR(fence))
|
||||
return PTR_ERR(fence);
|
||||
if (!fence)
|
||||
return -ENOMEM;
|
||||
|
||||
drm_syncobj_replace_fence(syncobj, fence);
|
||||
dma_fence_put(fence);
|
||||
|
@ -755,10 +755,14 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
|
||||
|
||||
/* normal [4.15.1.2] or error [4.15.1.1] completion */
|
||||
if (likely ((status & (STS_INT|STS_ERR)) != 0)) {
|
||||
if (likely ((status & STS_ERR) == 0))
|
||||
if (likely ((status & STS_ERR) == 0)) {
|
||||
INCR(ehci->stats.normal);
|
||||
else
|
||||
} else {
|
||||
/* Force to check port status */
|
||||
if (ehci->has_fsl_port_bug)
|
||||
status |= STS_PCD;
|
||||
INCR(ehci->stats.error);
|
||||
}
|
||||
bh = 1;
|
||||
}
|
||||
|
||||
|
@ -674,7 +674,8 @@ ehci_hub_status_data (struct usb_hcd *hcd, char *buf)
|
||||
|
||||
if ((temp & mask) != 0 || test_bit(i, &ehci->port_c_suspend)
|
||||
|| (ehci->reset_done[i] && time_after_eq(
|
||||
jiffies, ehci->reset_done[i]))) {
|
||||
jiffies, ehci->reset_done[i]))
|
||||
|| ehci_has_ci_pec_bug(ehci, temp)) {
|
||||
if (i < 7)
|
||||
buf [0] |= 1 << (i + 1);
|
||||
else
|
||||
@ -875,6 +876,13 @@ int ehci_hub_control(
|
||||
if (temp & PORT_PEC)
|
||||
status |= USB_PORT_STAT_C_ENABLE << 16;
|
||||
|
||||
if (ehci_has_ci_pec_bug(ehci, temp)) {
|
||||
status |= USB_PORT_STAT_C_ENABLE << 16;
|
||||
ehci_info(ehci,
|
||||
"PE is cleared by HW port:%d PORTSC:%08x\n",
|
||||
wIndex + 1, temp);
|
||||
}
|
||||
|
||||
if ((temp & PORT_OCC) && (!ignore_oc && !ehci->spurious_oc)){
|
||||
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
|
||||
|
||||
|
@ -490,13 +490,14 @@ static int tt_no_collision(
|
||||
static void enable_periodic(struct ehci_hcd *ehci)
|
||||
{
|
||||
if (ehci->periodic_count++)
|
||||
return;
|
||||
goto out;
|
||||
|
||||
/* Stop waiting to turn off the periodic schedule */
|
||||
ehci->enabled_hrtimer_events &= ~BIT(EHCI_HRTIMER_DISABLE_PERIODIC);
|
||||
|
||||
/* Don't start the schedule until PSS is 0 */
|
||||
ehci_poll_PSS(ehci);
|
||||
out:
|
||||
turn_on_io_watchdog(ehci);
|
||||
}
|
||||
|
||||
|
@ -707,6 +707,15 @@ ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
|
||||
*/
|
||||
#define ehci_has_fsl_susp_errata(e) ((e)->has_fsl_susp_errata)
|
||||
|
||||
/*
|
||||
* Some Freescale/NXP processors using ChipIdea IP have a bug in which
|
||||
* disabling the port (PE is cleared) does not cause PEC to be asserted
|
||||
* when frame babble is detected.
|
||||
*/
|
||||
#define ehci_has_ci_pec_bug(e, portsc) \
|
||||
((e)->has_fsl_port_bug && ((e)->command & CMD_PSE) \
|
||||
&& !(portsc & PORT_PEC) && !(portsc & PORT_PE))
|
||||
|
||||
/*
|
||||
* While most USB host controllers implement their registers in
|
||||
* little-endian format, a minority (celleb companion chip) implement
|
||||
|
@ -584,7 +584,7 @@ static inline signed long dma_fence_wait(struct dma_fence *fence, bool intr)
|
||||
}
|
||||
|
||||
struct dma_fence *dma_fence_get_stub(void);
|
||||
struct dma_fence *dma_fence_allocate_private_stub(void);
|
||||
struct dma_fence *dma_fence_allocate_private_stub(ktime_t timestamp);
|
||||
u64 dma_fence_context_alloc(unsigned num);
|
||||
|
||||
extern const struct dma_fence_ops dma_fence_array_ops;
|
||||
|
101
mm/zsmalloc.c
101
mm/zsmalloc.c
@ -33,8 +33,7 @@
|
||||
/*
|
||||
* lock ordering:
|
||||
* page_lock
|
||||
* pool->migrate_lock
|
||||
* class->lock
|
||||
* pool->lock
|
||||
* zspage->lock
|
||||
*/
|
||||
|
||||
@ -192,7 +191,6 @@ static const int fullness_threshold_frac = 4;
|
||||
static size_t huge_class_size;
|
||||
|
||||
struct size_class {
|
||||
spinlock_t lock;
|
||||
struct list_head fullness_list[NR_ZS_FULLNESS];
|
||||
/*
|
||||
* Size of objects stored in this class. Must be multiple
|
||||
@ -247,8 +245,7 @@ struct zs_pool {
|
||||
#ifdef CONFIG_COMPACTION
|
||||
struct work_struct free_work;
|
||||
#endif
|
||||
/* protect page/zspage migration */
|
||||
rwlock_t migrate_lock;
|
||||
spinlock_t lock;
|
||||
};
|
||||
|
||||
struct zspage {
|
||||
@ -355,7 +352,7 @@ static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
|
||||
kmem_cache_free(pool->zspage_cachep, zspage);
|
||||
}
|
||||
|
||||
/* class->lock(which owns the handle) synchronizes races */
|
||||
/* pool->lock(which owns the handle) synchronizes races */
|
||||
static void record_obj(unsigned long handle, unsigned long obj)
|
||||
{
|
||||
*(unsigned long *)handle = obj;
|
||||
@ -452,7 +449,7 @@ static __maybe_unused int is_first_page(struct page *page)
|
||||
return PagePrivate(page);
|
||||
}
|
||||
|
||||
/* Protected by class->lock */
|
||||
/* Protected by pool->lock */
|
||||
static inline int get_zspage_inuse(struct zspage *zspage)
|
||||
{
|
||||
return zspage->inuse;
|
||||
@ -597,13 +594,13 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
|
||||
if (class->index != i)
|
||||
continue;
|
||||
|
||||
spin_lock(&class->lock);
|
||||
spin_lock(&pool->lock);
|
||||
class_almost_full = zs_stat_get(class, CLASS_ALMOST_FULL);
|
||||
class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
|
||||
obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
|
||||
obj_used = zs_stat_get(class, OBJ_USED);
|
||||
freeable = zs_can_compact(class);
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
objs_per_zspage = class->objs_per_zspage;
|
||||
pages_used = obj_allocated / objs_per_zspage *
|
||||
@ -916,7 +913,7 @@ static void __free_zspage(struct zs_pool *pool, struct size_class *class,
|
||||
|
||||
get_zspage_mapping(zspage, &class_idx, &fg);
|
||||
|
||||
assert_spin_locked(&class->lock);
|
||||
assert_spin_locked(&pool->lock);
|
||||
|
||||
VM_BUG_ON(get_zspage_inuse(zspage));
|
||||
VM_BUG_ON(fg != ZS_EMPTY);
|
||||
@ -1247,19 +1244,19 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
|
||||
BUG_ON(in_interrupt());
|
||||
|
||||
/* It guarantees it can get zspage from handle safely */
|
||||
read_lock(&pool->migrate_lock);
|
||||
spin_lock(&pool->lock);
|
||||
obj = handle_to_obj(handle);
|
||||
obj_to_location(obj, &page, &obj_idx);
|
||||
zspage = get_zspage(page);
|
||||
|
||||
/*
|
||||
* migration cannot move any zpages in this zspage. Here, class->lock
|
||||
* migration cannot move any zpages in this zspage. Here, pool->lock
|
||||
* is too heavy since callers would take some time until they calls
|
||||
* zs_unmap_object API so delegate the locking from class to zspage
|
||||
* which is smaller granularity.
|
||||
*/
|
||||
migrate_read_lock(zspage);
|
||||
read_unlock(&pool->migrate_lock);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
class = zspage_class(pool, zspage);
|
||||
off = (class->size * obj_idx) & ~PAGE_MASK;
|
||||
@ -1412,8 +1409,8 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||
size += ZS_HANDLE_SIZE;
|
||||
class = pool->size_class[get_size_class_index(size)];
|
||||
|
||||
/* class->lock effectively protects the zpage migration */
|
||||
spin_lock(&class->lock);
|
||||
/* pool->lock effectively protects the zpage migration */
|
||||
spin_lock(&pool->lock);
|
||||
zspage = find_get_zspage(class);
|
||||
if (likely(zspage)) {
|
||||
obj = obj_malloc(pool, zspage, handle);
|
||||
@ -1421,12 +1418,12 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||
fix_fullness_group(class, zspage);
|
||||
record_obj(handle, obj);
|
||||
class_stat_inc(class, OBJ_USED, 1);
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return handle;
|
||||
}
|
||||
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
zspage = alloc_zspage(pool, class, gfp);
|
||||
if (!zspage) {
|
||||
@ -1434,7 +1431,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||
return (unsigned long)ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
spin_lock(&class->lock);
|
||||
spin_lock(&pool->lock);
|
||||
obj = obj_malloc(pool, zspage, handle);
|
||||
newfg = get_fullness_group(class, zspage);
|
||||
insert_zspage(class, zspage, newfg);
|
||||
@ -1447,7 +1444,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
|
||||
|
||||
/* We completely set up zspage so mark them as movable */
|
||||
SetZsPageMovable(pool, zspage);
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return handle;
|
||||
}
|
||||
@ -1491,16 +1488,14 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
||||
return;
|
||||
|
||||
/*
|
||||
* The pool->migrate_lock protects the race with zpage's migration
|
||||
* The pool->lock protects the race with zpage's migration
|
||||
* so it's safe to get the page from handle.
|
||||
*/
|
||||
read_lock(&pool->migrate_lock);
|
||||
spin_lock(&pool->lock);
|
||||
obj = handle_to_obj(handle);
|
||||
obj_to_page(obj, &f_page);
|
||||
zspage = get_zspage(f_page);
|
||||
class = zspage_class(pool, zspage);
|
||||
spin_lock(&class->lock);
|
||||
read_unlock(&pool->migrate_lock);
|
||||
|
||||
obj_free(class->size, obj);
|
||||
class_stat_dec(class, OBJ_USED, 1);
|
||||
@ -1510,7 +1505,7 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
|
||||
|
||||
free_zspage(pool, class, zspage);
|
||||
out:
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
cache_free_handle(pool, handle);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(zs_free);
|
||||
@ -1821,6 +1816,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
|
||||
|
||||
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
{
|
||||
struct zs_pool *pool;
|
||||
struct zspage *zspage;
|
||||
|
||||
/*
|
||||
@ -1831,9 +1827,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
VM_BUG_ON_PAGE(PageIsolated(page), page);
|
||||
|
||||
zspage = get_zspage(page);
|
||||
migrate_write_lock(zspage);
|
||||
pool = zspage->pool;
|
||||
spin_lock(&pool->lock);
|
||||
inc_zspage_isolation(zspage);
|
||||
migrate_write_unlock(zspage);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1867,16 +1864,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
pool = zspage->pool;
|
||||
|
||||
/*
|
||||
* The pool migrate_lock protects the race between zpage migration
|
||||
* The pool's lock protects the race between zpage migration
|
||||
* and zs_free.
|
||||
*/
|
||||
write_lock(&pool->migrate_lock);
|
||||
spin_lock(&pool->lock);
|
||||
class = zspage_class(pool, zspage);
|
||||
|
||||
/*
|
||||
* the class lock protects zpage alloc/free in the zspage.
|
||||
*/
|
||||
spin_lock(&class->lock);
|
||||
/* the migrate_write_lock protects zpage access via zs_map_object */
|
||||
migrate_write_lock(zspage);
|
||||
|
||||
@ -1904,13 +1897,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
kunmap_atomic(s_addr);
|
||||
|
||||
replace_sub_page(class, zspage, newpage, page);
|
||||
dec_zspage_isolation(zspage);
|
||||
/*
|
||||
* Since we complete the data copy and set up new zspage structure,
|
||||
* it's okay to release migration_lock.
|
||||
* it's okay to release the pool's lock.
|
||||
*/
|
||||
write_unlock(&pool->migrate_lock);
|
||||
spin_unlock(&class->lock);
|
||||
dec_zspage_isolation(zspage);
|
||||
spin_unlock(&pool->lock);
|
||||
migrate_write_unlock(zspage);
|
||||
|
||||
get_page(newpage);
|
||||
@ -1928,14 +1920,16 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
static void zs_page_putback(struct page *page)
|
||||
{
|
||||
struct zspage *zspage;
|
||||
struct zs_pool *pool;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageMovable(page), page);
|
||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||
|
||||
zspage = get_zspage(page);
|
||||
migrate_write_lock(zspage);
|
||||
pool = zspage->pool;
|
||||
spin_lock(&pool->lock);
|
||||
dec_zspage_isolation(zspage);
|
||||
migrate_write_unlock(zspage);
|
||||
spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
static const struct movable_operations zsmalloc_mops = {
|
||||
@ -1964,9 +1958,9 @@ static void async_free_zspage(struct work_struct *work)
|
||||
if (class->index != i)
|
||||
continue;
|
||||
|
||||
spin_lock(&class->lock);
|
||||
spin_lock(&pool->lock);
|
||||
list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
|
||||
@ -1976,9 +1970,9 @@ static void async_free_zspage(struct work_struct *work)
|
||||
get_zspage_mapping(zspage, &class_idx, &fullness);
|
||||
VM_BUG_ON(fullness != ZS_EMPTY);
|
||||
class = pool->size_class[class_idx];
|
||||
spin_lock(&class->lock);
|
||||
spin_lock(&pool->lock);
|
||||
__free_zspage(pool, class, zspage);
|
||||
spin_unlock(&class->lock);
|
||||
spin_unlock(&pool->lock);
|
||||
}
|
||||
};
|
||||
|
||||
@ -2039,10 +2033,11 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
||||
struct zspage *dst_zspage = NULL;
|
||||
unsigned long pages_freed = 0;
|
||||
|
||||
/* protect the race between zpage migration and zs_free */
|
||||
write_lock(&pool->migrate_lock);
|
||||
/* protect zpage allocation/free */
|
||||
spin_lock(&class->lock);
|
||||
/*
|
||||
* protect the race between zpage migration and zs_free
|
||||
* as well as zpage allocation/free
|
||||
*/
|
||||
spin_lock(&pool->lock);
|
||||
while ((src_zspage = isolate_zspage(class, true))) {
|
||||
/* protect someone accessing the zspage(i.e., zs_map_object) */
|
||||
migrate_write_lock(src_zspage);
|
||||
@ -2067,7 +2062,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
||||
putback_zspage(class, dst_zspage);
|
||||
migrate_write_unlock(dst_zspage);
|
||||
dst_zspage = NULL;
|
||||
if (rwlock_is_contended(&pool->migrate_lock))
|
||||
if (spin_is_contended(&pool->lock))
|
||||
break;
|
||||
}
|
||||
|
||||
@ -2084,11 +2079,9 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
||||
pages_freed += class->pages_per_zspage;
|
||||
} else
|
||||
migrate_write_unlock(src_zspage);
|
||||
spin_unlock(&class->lock);
|
||||
write_unlock(&pool->migrate_lock);
|
||||
spin_unlock(&pool->lock);
|
||||
cond_resched();
|
||||
write_lock(&pool->migrate_lock);
|
||||
spin_lock(&class->lock);
|
||||
spin_lock(&pool->lock);
|
||||
}
|
||||
|
||||
if (src_zspage) {
|
||||
@ -2096,8 +2089,7 @@ static unsigned long __zs_compact(struct zs_pool *pool,
|
||||
migrate_write_unlock(src_zspage);
|
||||
}
|
||||
|
||||
spin_unlock(&class->lock);
|
||||
write_unlock(&pool->migrate_lock);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return pages_freed;
|
||||
}
|
||||
@ -2200,7 +2192,7 @@ struct zs_pool *zs_create_pool(const char *name)
|
||||
return NULL;
|
||||
|
||||
init_deferred_free(pool);
|
||||
rwlock_init(&pool->migrate_lock);
|
||||
spin_lock_init(&pool->lock);
|
||||
|
||||
pool->name = kstrdup(name, GFP_KERNEL);
|
||||
if (!pool->name)
|
||||
@ -2271,7 +2263,6 @@ struct zs_pool *zs_create_pool(const char *name)
|
||||
class->index = i;
|
||||
class->pages_per_zspage = pages_per_zspage;
|
||||
class->objs_per_zspage = objs_per_zspage;
|
||||
spin_lock_init(&class->lock);
|
||||
pool->size_class[i] = class;
|
||||
for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
|
||||
fullness++)
|
||||
|
Loading…
Reference in New Issue
Block a user