summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_migrate.c
blob: 4c2cc0f0084e17653da08d11cb1bfad33e1d4526 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
 *
 * (C) COPYRIGHT 2022-2023 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

/**
 * DOC: Base kernel page migration implementation.
 */
#include <linux/migrate.h>

#include <mali_kbase.h>
#include <mali_kbase_mem_migrate.h>
#include <mmu/mali_kbase_mmu.h>

/* Global integer used to determine if module parameter value has been
 * provided and if page migration feature is enabled.
 * Feature is disabled on all platforms by default.
 */
#if !IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT)
/* If page migration support is explicitly compiled out, there should be no way to change
 * this int. Its value is automatically 0 as a global.
 */
const int kbase_page_migration_enabled;
/* module_param is not called so this value cannot be changed at insmod when compiled
 * without support for page migration.
 */
#else
/* -1 as default, 0 when manually set as off and 1 when manually set as on */
int kbase_page_migration_enabled = -1;
module_param(kbase_page_migration_enabled, int, 0444);
MODULE_PARM_DESC(kbase_page_migration_enabled,
		 "Explicitly enable or disable page migration with 1 or 0 respectively.");
#endif /* !IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT) */

KBASE_EXPORT_TEST_API(kbase_page_migration_enabled);

bool kbase_is_page_migration_enabled(void)
{
	/* Handle uninitialised int case */
	if (kbase_page_migration_enabled < 0)
		return false;
	return IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT) && kbase_page_migration_enabled;
}
KBASE_EXPORT_SYMBOL(kbase_is_page_migration_enabled);

#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
static const struct movable_operations movable_ops;
#endif

bool kbase_alloc_page_metadata(struct kbase_device *kbdev, struct page *p, dma_addr_t dma_addr,
			       u8 group_id)
{
	struct kbase_page_metadata *page_md;

	/* A check for kbase_page_migration_enabled would help here too but it's already being
	 * checked in the only caller of this function.
	 */
	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return false;

	page_md = kzalloc(sizeof(struct kbase_page_metadata), GFP_KERNEL);
	if (!page_md)
		return false;

	SetPagePrivate(p);
	set_page_private(p, (unsigned long)page_md);
	page_md->dma_addr = dma_addr;
	page_md->status = PAGE_STATUS_SET(page_md->status, (u8)ALLOCATE_IN_PROGRESS);
	page_md->vmap_count = 0;
	page_md->group_id = group_id;
	spin_lock_init(&page_md->migrate_lock);

	lock_page(p);
#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
	__SetPageMovable(p, &movable_ops);
	page_md->status = PAGE_MOVABLE_SET(page_md->status);
#else
	/* In some corner cases, the driver may attempt to allocate memory pages
	 * even before the device file is open and the mapping for address space
	 * operations is created. In that case, it is impossible to assign address
	 * space operations to memory pages: simply pretend that they are movable,
	 * even if they are not.
	 *
	 * The page will go through all state transitions but it will never be
	 * actually considered movable by the kernel. This is due to the fact that
	 * the page cannot be marked as NOT_MOVABLE upon creation, otherwise the
	 * memory pool will always refuse to add it to the pool and schedule
	 * a worker thread to free it later.
	 *
	 * Page metadata may seem redundant in this case, but they are not,
	 * because memory pools expect metadata to be present when page migration
	 * is enabled and because the pages may always return to memory pools and
	 * gain the movable property later on in their life cycle.
	 */
	if (kbdev->mem_migrate.inode && kbdev->mem_migrate.inode->i_mapping) {
		__SetPageMovable(p, kbdev->mem_migrate.inode->i_mapping);
		page_md->status = PAGE_MOVABLE_SET(page_md->status);
	}
#endif
	unlock_page(p);

	return true;
}

static void kbase_free_page_metadata(struct kbase_device *kbdev, struct page *p, u8 *group_id)
{
	struct device *const dev = kbdev->dev;
	struct kbase_page_metadata *page_md;
	dma_addr_t dma_addr;

	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return;
	page_md = kbase_page_private(p);
	if (!page_md)
		return;

	if (group_id)
		*group_id = page_md->group_id;
	dma_addr = kbase_dma_addr(p);
	dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

	kfree(page_md);
	set_page_private(p, 0);
	ClearPagePrivate(p);
}

#if IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT)
/* This function is only called when page migration
 * support is not explicitly compiled out.
 */
static void kbase_free_pages_worker(struct work_struct *work)
{
	struct kbase_mem_migrate *mem_migrate =
		container_of(work, struct kbase_mem_migrate, free_pages_work);
	struct kbase_device *kbdev = container_of(mem_migrate, struct kbase_device, mem_migrate);
	struct page *p, *tmp;
	struct kbase_page_metadata *page_md;
	LIST_HEAD(free_list);

	spin_lock(&mem_migrate->free_pages_lock);
	list_splice_init(&mem_migrate->free_pages_list, &free_list);
	spin_unlock(&mem_migrate->free_pages_lock);
	list_for_each_entry_safe(p, tmp, &free_list, lru) {
		u8 group_id = 0;
		list_del_init(&p->lru);

		lock_page(p);
		page_md = kbase_page_private(p);
		if (page_md && IS_PAGE_MOVABLE(page_md->status)) {
			__ClearPageMovable(p);
			page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
		}
		unlock_page(p);

		kbase_free_page_metadata(kbdev, p, &group_id);
		kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, group_id, p, 0);
	}
}
#endif

void kbase_free_page_later(struct kbase_device *kbdev, struct page *p)
{
	struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;

	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return;
	spin_lock(&mem_migrate->free_pages_lock);
	list_add(&p->lru, &mem_migrate->free_pages_list);
	spin_unlock(&mem_migrate->free_pages_lock);
}

/**
 * kbasep_migrate_page_pt_mapped - Migrate a memory page that is mapped
 *                                 in a PGD of kbase_mmu_table.
 *
 * @old_page:  Existing PGD page to remove
 * @new_page:  Destination for migrating the existing PGD page to
 *
 * Replace an existing PGD page with a new page by migrating its content. More specifically:
 * the new page shall replace the existing PGD page in the MMU page table. Before returning,
 * the new page shall be set as movable and not isolated, while the old page shall lose
 * the movable property. The meta data attached to the PGD page is transferred to the
 * new (replacement) page.
 *
 * This function returns early with an error if called when not compiled with
 * CONFIG_PAGE_MIGRATION_SUPPORT.
 *
 * Return: 0 on migration success, or -EAGAIN for a later retry. Otherwise it's a failure
 *          and the migration is aborted.
 */
static int kbasep_migrate_page_pt_mapped(struct page *old_page, struct page *new_page)
{
	struct kbase_page_metadata *page_md = kbase_page_private(old_page);
	struct kbase_context *kctx = page_md->data.pt_mapped.mmut->kctx;
	struct kbase_device *kbdev = kctx->kbdev;
	dma_addr_t old_dma_addr = page_md->dma_addr;
	dma_addr_t new_dma_addr;
	int ret;

	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return -EINVAL;

	/* Create a new dma map for the new page */
	new_dma_addr = dma_map_page(kbdev->dev, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(kbdev->dev, new_dma_addr))
		return -ENOMEM;

	/* Lock context to protect access to the page in physical allocation.
	 * This blocks the CPU page fault handler from remapping pages.
	 * Only MCU's mmut is device wide, i.e. no corresponding kctx.
	 */
	kbase_gpu_vm_lock_with_pmode_sync(kctx);

	ret = kbase_mmu_migrate_page(
		as_tagged(page_to_phys(old_page)), as_tagged(page_to_phys(new_page)), old_dma_addr,
		new_dma_addr, PGD_VPFN_LEVEL_GET_LEVEL(page_md->data.pt_mapped.pgd_vpfn_level));

	if (ret == 0) {
		dma_unmap_page(kbdev->dev, old_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
		__ClearPageMovable(old_page);
		ClearPagePrivate(old_page);
		put_page(old_page);

#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
		__SetPageMovable(new_page, &movable_ops);
		page_md->status = PAGE_MOVABLE_SET(page_md->status);
#else
		if (kbdev->mem_migrate.inode->i_mapping) {
			__SetPageMovable(new_page, kbdev->mem_migrate.inode->i_mapping);
			page_md->status = PAGE_MOVABLE_SET(page_md->status);
		}
#endif
		SetPagePrivate(new_page);
		get_page(new_page);
	} else
		dma_unmap_page(kbdev->dev, new_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

	/* Page fault handler for CPU mapping unblocked. */
	kbase_gpu_vm_unlock_with_pmode_sync(kctx);

	return ret;
}

/*
 * kbasep_migrate_page_allocated_mapped - Migrate a memory page that is both
 *                                        allocated and mapped.
 *
 * @old_page:  Page to remove.
 * @new_page:  Page to add.
 *
 * Replace an old page with a new page by migrating its content and all its
 * CPU and GPU mappings. More specifically: the new page shall replace the
 * old page in the MMU page table, as well as in the page array of the physical
 * allocation, which is used to create CPU mappings. Before returning, the new
 * page shall be set as movable and not isolated, while the old page shall lose
 * the movable property.
 *
 * This function returns early with an error if called when not compiled with
 * CONFIG_PAGE_MIGRATION_SUPPORT.
 */
static int kbasep_migrate_page_allocated_mapped(struct page *old_page, struct page *new_page)
{
	struct kbase_page_metadata *page_md = kbase_page_private(old_page);
	struct kbase_context *kctx = page_md->data.mapped.mmut->kctx;
	dma_addr_t old_dma_addr, new_dma_addr;
	int ret;

	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return -EINVAL;
	old_dma_addr = page_md->dma_addr;
	new_dma_addr = dma_map_page(kctx->kbdev->dev, new_page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(kctx->kbdev->dev, new_dma_addr))
		return -ENOMEM;

	/* Lock context to protect access to array of pages in physical allocation.
	 * This blocks the CPU page fault handler from remapping pages.
	 */
	kbase_gpu_vm_lock_with_pmode_sync(kctx);

	/* Unmap the old physical range. */
	unmap_mapping_range(kctx->kfile->filp->f_inode->i_mapping,
			    page_md->data.mapped.vpfn << PAGE_SHIFT,
			    PAGE_SIZE, 1);

	ret = kbase_mmu_migrate_page(as_tagged(page_to_phys(old_page)),
				     as_tagged(page_to_phys(new_page)), old_dma_addr, new_dma_addr,
				     MIDGARD_MMU_BOTTOMLEVEL);

	if (ret == 0) {
		dma_unmap_page(kctx->kbdev->dev, old_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

		SetPagePrivate(new_page);
		get_page(new_page);

		/* Clear PG_movable from the old page and release reference. */
		ClearPagePrivate(old_page);
		__ClearPageMovable(old_page);
		put_page(old_page);

		/* Set PG_movable to the new page. */
#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
		__SetPageMovable(new_page, &movable_ops);
		page_md->status = PAGE_MOVABLE_SET(page_md->status);
#else
		if (kctx->kbdev->mem_migrate.inode->i_mapping) {
			__SetPageMovable(new_page, kctx->kbdev->mem_migrate.inode->i_mapping);
			page_md->status = PAGE_MOVABLE_SET(page_md->status);
		}
#endif
	} else
		dma_unmap_page(kctx->kbdev->dev, new_dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);

	/* Page fault handler for CPU mapping unblocked. */
	kbase_gpu_vm_unlock_with_pmode_sync(kctx);

	return ret;
}

/**
 * kbase_page_isolate - Isolate a page for migration.
 *
 * @p:    Pointer of the page struct of page to isolate.
 * @mode: LRU Isolation modes.
 *
 * Callback function for Linux to isolate a page and prepare it for migration.
 * This callback is not registered if compiled without CONFIG_PAGE_MIGRATION_SUPPORT.
 *
 * Return: true on success, false otherwise.
 */
static bool kbase_page_isolate(struct page *p, isolate_mode_t mode)
{
	bool status_mem_pool = false;
	struct kbase_mem_pool *mem_pool = NULL;
	struct kbase_page_metadata *page_md = kbase_page_private(p);

	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return false;
	CSTD_UNUSED(mode);

	if (!page_md || !IS_PAGE_MOVABLE(page_md->status))
		return false;

	if (!spin_trylock(&page_md->migrate_lock))
		return false;

	if (WARN_ON(IS_PAGE_ISOLATED(page_md->status))) {
		spin_unlock(&page_md->migrate_lock);
		return false;
	}

	switch (PAGE_STATUS_GET(page_md->status)) {
	case MEM_POOL:
		/* Prepare to remove page from memory pool later only if pool is not
		 * in the process of termination.
		 */
		mem_pool = page_md->data.mem_pool.pool;
		status_mem_pool = true;
		preempt_disable();
		atomic_inc(&mem_pool->isolation_in_progress_cnt);
		break;
	case ALLOCATED_MAPPED:
		/* Mark the page into isolated state, but only if it has no
		 * kernel CPU mappings
		 */
		if (page_md->vmap_count == 0)
			page_md->status = PAGE_ISOLATE_SET(page_md->status, 1);
		break;
	case PT_MAPPED:
		/* Mark the page into isolated state. */
		page_md->status = PAGE_ISOLATE_SET(page_md->status, 1);
		break;
	case SPILL_IN_PROGRESS:
	case ALLOCATE_IN_PROGRESS:
	case FREE_IN_PROGRESS:
		break;
	case NOT_MOVABLE:
		/* Opportunistically clear the movable property for these pages */
		__ClearPageMovable(p);
		page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
		break;
	default:
		/* State should always fall in one of the previous cases!
		 * Also notice that FREE_ISOLATED_IN_PROGRESS or
		 * FREE_PT_ISOLATED_IN_PROGRESS is impossible because
		 * that state only applies to pages that are already isolated.
		 */
		page_md->status = PAGE_ISOLATE_SET(page_md->status, 0);
		break;
	}

	spin_unlock(&page_md->migrate_lock);

	/* If the page is still in the memory pool: try to remove it. This will fail
	 * if pool lock is taken which could mean page no longer exists in pool.
	 */
	if (status_mem_pool) {
		if (!spin_trylock(&mem_pool->pool_lock)) {
			atomic_dec(&mem_pool->isolation_in_progress_cnt);
			preempt_enable();
			return false;
		}

		spin_lock(&page_md->migrate_lock);
		/* Check status again to ensure page has not been removed from memory pool. */
		if (PAGE_STATUS_GET(page_md->status) == MEM_POOL) {
			page_md->status = PAGE_ISOLATE_SET(page_md->status, 1);
			list_del_init(&p->lru);
			mem_pool->cur_size--;
		}
		spin_unlock(&page_md->migrate_lock);
		spin_unlock(&mem_pool->pool_lock);
		atomic_dec(&mem_pool->isolation_in_progress_cnt);
		preempt_enable();
	}

	return IS_PAGE_ISOLATED(page_md->status);
}

/**
 * kbase_page_migrate - Migrate content of old page to new page provided.
 *
 * @mapping:  Pointer to address_space struct associated with pages.
 * @new_page: Pointer to the page struct of new page.
 * @old_page: Pointer to the page struct of old page.
 * @mode:     Mode to determine if migration will be synchronised.
 *
 * Callback function for Linux to migrate the content of the old page to the
 * new page provided.
 * This callback is not registered if compiled without CONFIG_PAGE_MIGRATION_SUPPORT.
 *
 * Return: 0 on success, error code otherwise.
 */
#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
static int kbase_page_migrate(struct address_space *mapping, struct page *new_page,
			      struct page *old_page, enum migrate_mode mode)
#else
static int kbase_page_migrate(struct page *new_page, struct page *old_page, enum migrate_mode mode)
#endif
{
	int err = 0;
	bool status_mem_pool = false;
	bool status_free_pt_isolated_in_progress = false;
	bool status_free_isolated_in_progress = false;
	bool status_pt_mapped = false;
	bool status_mapped = false;
	bool status_not_movable = false;
	struct kbase_page_metadata *page_md = kbase_page_private(old_page);
	struct kbase_device *kbdev = NULL;

#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
	CSTD_UNUSED(mapping);
#endif
	CSTD_UNUSED(mode);

	if (!kbase_is_page_migration_enabled() || !page_md || !IS_PAGE_MOVABLE(page_md->status))
		return -EINVAL;

	if (!spin_trylock(&page_md->migrate_lock))
		return -EAGAIN;

	if (WARN_ON(!IS_PAGE_ISOLATED(page_md->status))) {
		spin_unlock(&page_md->migrate_lock);
		return -EINVAL;
	}

	switch (PAGE_STATUS_GET(page_md->status)) {
	case MEM_POOL:
		status_mem_pool = true;
		kbdev = page_md->data.mem_pool.kbdev;
		break;
	case ALLOCATED_MAPPED:
		status_mapped = true;
		break;
	case PT_MAPPED:
		status_pt_mapped = true;
		break;
	case FREE_ISOLATED_IN_PROGRESS:
		status_free_isolated_in_progress = true;
		kbdev = page_md->data.free_isolated.kbdev;
		break;
	case FREE_PT_ISOLATED_IN_PROGRESS:
		status_free_pt_isolated_in_progress = true;
		kbdev = page_md->data.free_pt_isolated.kbdev;
		break;
	case NOT_MOVABLE:
		status_not_movable = true;
		break;
	default:
		/* State should always fall in one of the previous cases! */
		err = -EAGAIN;
		break;
	}

	spin_unlock(&page_md->migrate_lock);

	if (status_mem_pool || status_free_isolated_in_progress ||
	    status_free_pt_isolated_in_progress) {
		struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;

		kbase_free_page_metadata(kbdev, old_page, NULL);
		__ClearPageMovable(old_page);
		put_page(old_page);

		/* Just free new page to avoid lock contention. */
		INIT_LIST_HEAD(&new_page->lru);
		get_page(new_page);
		set_page_private(new_page, 0);
		kbase_free_page_later(kbdev, new_page);
		queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
	} else if (status_not_movable) {
		err = -EINVAL;
	} else if (status_mapped) {
		err = kbasep_migrate_page_allocated_mapped(old_page, new_page);
	} else if (status_pt_mapped) {
		err = kbasep_migrate_page_pt_mapped(old_page, new_page);
	}

	/* While we want to preserve the movability of pages for which we return
	 * EAGAIN, according to the kernel docs, movable pages for which a critical
	 * error is returned are called putback on, which may not be what we
	 * expect.
	 */
	if (err < 0 && err != -EAGAIN) {
		__ClearPageMovable(old_page);
		page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
	}

	return err;
}

/**
 * kbase_page_putback - Return isolated page back to kbase.
 *
 * @p: Pointer of the page struct of page.
 *
 * Callback function for Linux to return isolated page back to kbase. This
 * will only be called for a page that has been isolated but failed to
 * migrate. This function will put back the given page to the state it was
 * in before it was isolated.
 * This callback is not registered if compiled without CONFIG_PAGE_MIGRATION_SUPPORT.
 */
static void kbase_page_putback(struct page *p)
{
	bool status_mem_pool = false;
	bool status_free_isolated_in_progress = false;
	bool status_free_pt_isolated_in_progress = false;
	struct kbase_page_metadata *page_md = kbase_page_private(p);
	struct kbase_device *kbdev = NULL;

	if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
		return;
	/* If we don't have page metadata, the page may not belong to the
	 * driver or may already have been freed, and there's nothing we can do
	 */
	if (!page_md)
		return;

	spin_lock(&page_md->migrate_lock);

	if (WARN_ON(!IS_PAGE_ISOLATED(page_md->status))) {
		spin_unlock(&page_md->migrate_lock);
		return;
	}

	switch (PAGE_STATUS_GET(page_md->status)) {
	case MEM_POOL:
		status_mem_pool = true;
		kbdev = page_md->data.mem_pool.kbdev;
		break;
	case ALLOCATED_MAPPED:
		page_md->status = PAGE_ISOLATE_SET(page_md->status, 0);
		break;
	case PT_MAPPED:
	case NOT_MOVABLE:
		/* Pages should no longer be isolated if they are in a stable state
		 * and used by the driver.
		 */
		page_md->status = PAGE_ISOLATE_SET(page_md->status, 0);
		break;
	case FREE_ISOLATED_IN_PROGRESS:
		status_free_isolated_in_progress = true;
		kbdev = page_md->data.free_isolated.kbdev;
		break;
	case FREE_PT_ISOLATED_IN_PROGRESS:
		status_free_pt_isolated_in_progress = true;
		kbdev = page_md->data.free_pt_isolated.kbdev;
		break;
	default:
		/* State should always fall in one of the previous cases! */
		break;
	}

	spin_unlock(&page_md->migrate_lock);

	/* If page was in a memory pool then just free it to avoid lock contention. The
	 * same is also true to status_free_pt_isolated_in_progress.
	 */
	if (status_mem_pool || status_free_isolated_in_progress ||
	    status_free_pt_isolated_in_progress) {
		__ClearPageMovable(p);
		page_md->status = PAGE_MOVABLE_CLEAR(page_md->status);
		if (!WARN_ON_ONCE(!kbdev)) {
			struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;

			kbase_free_page_later(kbdev, p);
			queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
		}
	}
}

#if (KERNEL_VERSION(6, 0, 0) <= LINUX_VERSION_CODE)
static const struct movable_operations movable_ops = {
	.isolate_page = kbase_page_isolate,
	.migrate_page = kbase_page_migrate,
	.putback_page = kbase_page_putback,
};
#else
static const struct address_space_operations kbase_address_space_ops = {
	.isolate_page = kbase_page_isolate,
	.migratepage = kbase_page_migrate,
	.putback_page = kbase_page_putback,
};
#endif

#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
void kbase_mem_migrate_set_address_space_ops(struct kbase_device *kbdev, struct file *const filp)
{
	if (!kbase_is_page_migration_enabled())
		return;

	mutex_lock(&kbdev->fw_load_lock);

	if (filp) {
		filp->f_inode->i_mapping->a_ops = &kbase_address_space_ops;

		if (!kbdev->mem_migrate.inode) {
			kbdev->mem_migrate.inode = filp->f_inode;
			/* This reference count increment is balanced by iput()
			 * upon termination.
			 */
			atomic_inc(&filp->f_inode->i_count);
		} else {
			WARN_ON(kbdev->mem_migrate.inode != filp->f_inode);
		}
	}

	mutex_unlock(&kbdev->fw_load_lock);
}
#endif

void kbase_mem_migrate_init(struct kbase_device *kbdev)
{
#if !IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT)
	/* Page migration explicitly disabled at compile time - do nothing */
	return;
#else
	struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;

	/* Page migration support compiled in, either explicitly or
	 * by default, so the default behaviour is to follow the choice
	 * of large pages if not selected at insmod. Check insmod parameter
	 * integer for a negative value to see if insmod parameter was
	 * passed in at all (it will override the default negative value).
	 */
	if (kbase_page_migration_enabled < 0)
		kbase_page_migration_enabled = kbdev->pagesize_2mb ? 1 : 0;
	else
		dev_info(kbdev->dev, "Page migration support explicitly %s at insmod.",
			 kbase_page_migration_enabled ? "enabled" : "disabled");

	spin_lock_init(&mem_migrate->free_pages_lock);
	INIT_LIST_HEAD(&mem_migrate->free_pages_list);

#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
	mem_migrate->inode = NULL;
#endif
	mem_migrate->free_pages_workq =
		alloc_workqueue("free_pages_workq", WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
	INIT_WORK(&mem_migrate->free_pages_work, kbase_free_pages_worker);
#endif
}

void kbase_mem_migrate_term(struct kbase_device *kbdev)
{
	struct kbase_mem_migrate *mem_migrate = &kbdev->mem_migrate;

#if !IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT)
	/* Page migration explicitly disabled at compile time - do nothing */
	return;
#endif
	if (mem_migrate->free_pages_workq)
		destroy_workqueue(mem_migrate->free_pages_workq);
#if (KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE)
	iput(mem_migrate->inode);
#endif
}