summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem.h
blob: 739c7dc850aa1cdae90f0dd779a42152c8fd6d20 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
2344
2345
2346
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406
2407
2408
2409
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435
2436
2437
2438
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454
2455
2456
2457
2458
2459
2460
2461
2462
2463
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496
2497
2498
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526
2527
2528
2529
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551
2552
2553
2554
2555
2556
2557
2558
2559
2560
2561
2562
2563
2564
2565
2566
2567
2568
2569
2570
2571
2572
2573
2574
2575
2576
2577
2578
2579
2580
2581
2582
2583
2584
2585
2586
2587
2588
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604
2605
2606
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
 *
 * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

/**
 * DOC: Base kernel memory APIs
 */

#ifndef _KBASE_MEM_H_
#define _KBASE_MEM_H_

#ifndef _KBASE_H_
#error "Don't include this file directly, use mali_kbase.h instead"
#endif

#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <uapi/gpu/arm/midgard/mali_base_kernel.h>
#include <mali_kbase_hw.h>
#include "mali_kbase_pm.h"
#include "mali_kbase_defs.h"
/* Required for kbase_mem_evictable_unmake */
#include "mali_kbase_mem_linux.h"
#include "mali_kbase_reg_track.h"
#include "mali_kbase_mem_migrate.h"

#include <linux/version_compat_defs.h>
#include <linux/sched/mm.h>
#include <linux/kref.h>

static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages);

/* Part of the workaround for uTLB invalid pages is to ensure we grow/shrink tmem by 4 pages at a time */
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316 (2) /* round to 4 pages */

/* Part of the workaround for PRLAM-9630 requires us to grow/shrink memory by
 * 8 pages. The MMU reads in 8 page table entries from memory at a time, if we
 * have more than one page fault within the same 8 pages and page tables are
 * updated accordingly, the MMU does not re-read the page table entries from
 * memory for the subsequent page table updates and generates duplicate page
 * faults as the page table information used by the MMU is not valid.
 */
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630 (3) /* round to 8 pages */

#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2 (0) /* round to 1 page */

/* This must always be a power of 2 */
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES (1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2)
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_8316 \
	(1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_8316)
#define KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_HW_ISSUE_9630 \
	(1u << KBASEP_TMEM_GROWABLE_BLOCKSIZE_PAGES_LOG2_HW_ISSUE_9630)

/* Free region */
#define KBASE_REG_FREE (1ul << 0)
/* CPU write access */
#define KBASE_REG_CPU_WR (1ul << 1)
/* GPU write access */
#define KBASE_REG_GPU_WR (1ul << 2)
/* No eXecute flag */
#define KBASE_REG_GPU_NX (1ul << 3)
/* Is CPU cached? */
#define KBASE_REG_CPU_CACHED (1ul << 4)
/* Is GPU cached?
 * Some components within the GPU might only be able to access memory that is
 * GPU cacheable. Refer to the specific GPU implementation for more details.
 */
#define KBASE_REG_GPU_CACHED (1ul << 5)

#define KBASE_REG_GROWABLE (1ul << 6)
/* Can grow on pf? */
#define KBASE_REG_PF_GROW (1ul << 7)

/* Allocation doesn't straddle the 4GB boundary in GPU virtual space */
#define KBASE_REG_GPU_VA_SAME_4GB_PAGE (1ul << 8)

/* inner shareable coherency */
#define KBASE_REG_SHARE_IN (1ul << 9)
/* inner & outer shareable coherency */
#define KBASE_REG_SHARE_BOTH (1ul << 10)

/* Bits 11-13 (inclusive) are reserved for indicating the zone. */

/* GPU read access */
#define KBASE_REG_GPU_RD (1ul << 14)
/* CPU read access */
#define KBASE_REG_CPU_RD (1ul << 15)

/* Index of chosen MEMATTR for this region (0..7) */
#define KBASE_REG_MEMATTR_MASK (7ul << 16)
#define KBASE_REG_MEMATTR_INDEX(x) (((x)&7) << 16)
#define KBASE_REG_MEMATTR_VALUE(x) (((x)&KBASE_REG_MEMATTR_MASK) >> 16)

/* AS<n>_MEMATTR values from MMU_MEMATTR_STAGE1: */
/* Use GPU implementation-defined caching policy. */
#define KBASE_MEMATTR_IMPL_DEF_CACHE_POLICY                                         \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                  \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_IMPL) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_WRITE_BACK)))
/* The attribute set to force all resources to be cached. */
#define KBASE_MEMATTR_FORCE_TO_CACHE_ALL                                             \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_W_MASK |                   \
			      AS_MEMATTR_ATTRIBUTE0_ALLOC_R_MASK |                   \
			      AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                   \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_ALLOC) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                 \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_WRITE_BACK)))
/* Inner write-alloc cache setup, no outer caching */
#define KBASE_MEMATTR_WRITE_ALLOC                                                    \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_W_MASK |                   \
			      AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                   \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_ALLOC) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                 \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_WRITE_BACK)))
/* Set to implementation defined, outer caching */
#define KBASE_MEMATTR_AARCH64_OUTER_IMPL_DEF                                        \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                  \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_IMPL) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_WRITE_BACK)))
/* Set to write back memory, outer caching */
#define KBASE_MEMATTR_AARCH64_OUTER_WA                                               \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_W_MASK |                   \
			      AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                   \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_ALLOC) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                 \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_WRITE_BACK)))
/* Set to inner non-cacheable, outer-non-cacheable
 * Setting defined by the alloc bits is ignored, but set to a valid encoding:
 * - no-alloc on read
 * - no alloc on write
 */
#define KBASE_MEMATTR_AARCH64_NON_CACHEABLE                                          \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                   \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_ALLOC) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                 \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_NON_CACHEABLE)))

/* Symbols for default MEMATTR to use
 * Default is - HW implementation defined caching
 */
#define KBASE_MEMATTR_INDEX_DEFAULT 0
#define KBASE_MEMATTR_INDEX_DEFAULT_ACE 3

/* HW implementation defined caching */
#define KBASE_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY 0
/* Force cache on */
#define KBASE_MEMATTR_INDEX_FORCE_TO_CACHE_ALL 1
/* Write-alloc */
#define KBASE_MEMATTR_INDEX_WRITE_ALLOC 2
/* Outer coherent, inner implementation defined policy */
#define KBASE_MEMATTR_INDEX_OUTER_IMPL_DEF 3
/* Outer coherent, write alloc inner */
#define KBASE_MEMATTR_INDEX_OUTER_WA 4
/* Normal memory, inner non-cacheable, outer non-cacheable (ARMv8 mode only) */
#define KBASE_MEMATTR_INDEX_NON_CACHEABLE 5

#if MALI_USE_CSF
/* Set to shared memory, that is inner cacheable on ACE and inner or outer
 * shared, otherwise inner non-cacheable.
 * Outer cacheable if inner or outer shared, otherwise outer non-cacheable.
 */
#define KBASE_MEMATTR_AARCH64_SHARED                                                \
	((unsigned long long)(AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_SET(                  \
				      0ull, AS_MEMATTR_ATTRIBUTE0_ALLOC_SEL_IMPL) | \
			      AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SET(                \
				      0ull, AS_MEMATTR_ATTRIBUTE0_MEMORY_TYPE_SHARED)))

/* Normal memory, shared between MCU and Host */
#define KBASE_MEMATTR_INDEX_SHARED 6
#endif

#define KBASE_REG_PROTECTED (1ul << 19)

/* Region belongs to a shrinker.
 *
 * This can either mean that it is part of the JIT/Ephemeral or tiler heap
 * shrinker paths. Should be removed only after making sure that there are
 * no references remaining to it in these paths, as it may cause the physical
 * backing of the region to disappear during use.
 */
#define KBASE_REG_DONT_NEED (1ul << 20)

/* Imported buffer is padded? */
#define KBASE_REG_IMPORT_PAD (1ul << 21)

#if MALI_USE_CSF
/* CSF event memory */
#define KBASE_REG_CSF_EVENT (1ul << 22)
/* Bit 23 is reserved.
 *
 * Do not remove, use the next unreserved bit for new flags
 */
#define KBASE_REG_RESERVED_BIT_23 (1ul << 23)
#else
/* Bit 22 is reserved.
 *
 * Do not remove, use the next unreserved bit for new flags
 */
#define KBASE_REG_RESERVED_BIT_22 (1ul << 22)
/* The top of the initial commit is aligned to extension pages.
 * Extent must be a power of 2
 */
#define KBASE_REG_TILER_ALIGN_TOP (1ul << 23)
#endif /* MALI_USE_CSF */

/* Bit 24 is currently unused and is available for use for a new flag */

/* Memory has permanent kernel side mapping */
#define KBASE_REG_PERMANENT_KERNEL_MAPPING (1ul << 25)

/* GPU VA region has been freed by the userspace, but still remains allocated
 * due to the reference held by CPU mappings created on the GPU VA region.
 *
 * A region with this flag set has had kbase_gpu_munmap() called on it, but can
 * still be looked-up in the region tracker as a non-free region. Hence must
 * not create or update any more GPU mappings on such regions because they will
 * not be unmapped when the region is finally destroyed.
 *
 * Since such regions are still present in the region tracker, new allocations
 * attempted with BASE_MEM_SAME_VA might fail if their address intersects with
 * a region with this flag set.
 *
 * In addition, this flag indicates the gpu_alloc member might no longer valid
 * e.g. in infinite cache simulation.
 */
#define KBASE_REG_VA_FREED (1ul << 26)

/* If set, the heap info address points to a u32 holding the used size in bytes;
 * otherwise it points to a u64 holding the lowest address of unused memory.
 */
#define KBASE_REG_HEAP_INFO_IS_SIZE (1ul << 27)

/* Allocation is actively used for JIT memory */
#define KBASE_REG_ACTIVE_JIT_ALLOC (1ul << 28)

#if MALI_USE_CSF
/* This flag only applies to allocations in the EXEC_FIXED_VA and FIXED_VA
 * memory zones, and it determines whether they were created with a fixed
 * GPU VA address requested by the user.
 */
#define KBASE_REG_FIXED_ADDRESS (1ul << 29)
#else
#define KBASE_REG_RESERVED_BIT_29 (1ul << 29)
#endif
/*
 * A CPU mapping
 */
struct kbase_cpu_mapping {
	struct list_head mappings_list;
	struct kbase_mem_phy_alloc *alloc;
	struct kbase_context *kctx;
	struct kbase_va_region *region;
	int count;
	int free_on_close;
};

enum kbase_memory_type {
	KBASE_MEM_TYPE_NATIVE,
	KBASE_MEM_TYPE_IMPORTED_UMM,
	KBASE_MEM_TYPE_IMPORTED_USER_BUF,
	KBASE_MEM_TYPE_ALIAS,
	KBASE_MEM_TYPE_RAW
};

/* internal structure, mirroring base_mem_aliasing_info,
 * but with alloc instead of a gpu va (handle)
 */
struct kbase_aliased {
	struct kbase_mem_phy_alloc *alloc; /* NULL for special, non-NULL for native */
	u64 offset; /* in pages */
	u64 length; /* in pages */
};

/* Physical pages tracking object properties */
#define KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED (1u << 0)
#define KBASE_MEM_PHY_ALLOC_LARGE (1u << 1)

/* enum kbase_user_buf_state - State of a USER_BUF handle.
 * @KBASE_USER_BUF_STATE_EMPTY: Empty handle with no resources.
 * @KBASE_USER_BUF_STATE_PINNED: Physical pages have been pinned.
 * @KBASE_USER_BUF_STATE_DMA_MAPPED: DMA addresses for cache maintenance
 *                                   operations have been mapped.
 * @KBASE_USER_BUF_STATE_GPU_MAPPED: Mapped on GPU address space.
 */
enum kbase_user_buf_state {
	KBASE_USER_BUF_STATE_EMPTY,
	KBASE_USER_BUF_STATE_PINNED,
	KBASE_USER_BUF_STATE_DMA_MAPPED,
	KBASE_USER_BUF_STATE_GPU_MAPPED,
	KBASE_USER_BUF_STATE_COUNT = 4
};

/* struct kbase_mem_phy_alloc - Physical pages tracking object.
 *
 * Set up to track N pages.
 * N not stored here, the creator holds that info.
 * This object only tracks how many elements are actually valid (present).
 * Changing of nents or *pages should only happen if the kbase_mem_phy_alloc
 * is not shared with another region or client. CPU mappings are OK to
 * exist when changing, as long as the tracked mappings objects are
 * updated as part of the change.
 *
 * @kref: number of users of this alloc
 * @gpu_mappings: count number of times mapped on the GPU. Indicates the number
 *                of references there are to the physical pages from different
 *                GPU VA regions.
 * @kernel_mappings: count number of times mapped on the CPU, specifically in
 *                   the kernel. Indicates the number of references there are
 *                   to the physical pages to prevent flag changes or shrink
 *                   while maps are still held.
 * @nents: 0..N
 * @pages: N elements, only 0..(nents - 1) are valid
 * @mappings: List of CPU mappings of this physical memory allocation.
 * @evict_node: Node used to store this allocation on the eviction list
 * @evicted: Physical backing size when the pages where evicted
 * @reg: Back reference to the region structure which created this
 *       allocation, or NULL if it has been freed.
 * @type: type of buffer
 * @permanent_map: Kernel side mapping of the alloc, shall never be
 *                 referred directly. kbase_phy_alloc_mapping_get() &
 *                 kbase_phy_alloc_mapping_put() pair should be used
 *                 around access to the kernel-side CPU mapping so that
 *                 mapping doesn't disappear whilst it is being accessed.
 * @properties: Bitmask of properties, e.g. KBASE_MEM_PHY_ALLOC_LARGE.
 * @group_id: A memory group ID to be passed to a platform-specific
 *            memory group manager, if present.
 *            Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
 * @imported: member in union valid based on @a type
 */
struct kbase_mem_phy_alloc {
	struct kref kref;
	atomic_t gpu_mappings;
	atomic_t kernel_mappings;
	size_t nents;
	struct tagged_addr *pages;
	struct list_head mappings;
	struct list_head evict_node;
	size_t evicted;
	struct kbase_va_region *reg;
	enum kbase_memory_type type;
	struct kbase_vmap_struct *permanent_map;
	u8 properties;
	u8 group_id;

	union {
		struct {
			struct kbase_context *kctx;
			struct dma_buf *dma_buf;
			struct dma_buf_attachment *dma_attachment;
			unsigned int current_mapping_usage_count;
			struct sg_table *sgt;
			bool need_sync;
		} umm;
		struct {
			u64 stride;
			size_t nents;
			struct kbase_aliased *aliased;
		} alias;
		struct {
			struct kbase_context *kctx;
			/* Number of pages in this structure, including *pages.
			 * Used for kernel memory tracking.
			 */
			size_t nr_struct_pages;
		} native;
		struct kbase_alloc_import_user_buf {
			unsigned long address;
			unsigned long size;
			unsigned long nr_pages;
			struct page **pages;
			u32 current_mapping_usage_count;
			struct mm_struct *mm;
			dma_addr_t *dma_addrs;
			enum kbase_user_buf_state state;
		} user_buf;
	} imported;
};

/**
 * enum kbase_page_status - Status of a page used for page migration.
 *
 * @MEM_POOL: Stable state. Page is located in a memory pool and can safely
 *            be migrated.
 * @ALLOCATE_IN_PROGRESS: Transitory state. A page is set to this status as
 *                        soon as it leaves a memory pool.
 * @SPILL_IN_PROGRESS: Transitory state. Corner case where pages in a memory
 *                     pool of a dying context are being moved to the device
 *                     memory pool.
 * @NOT_MOVABLE: Stable state. Page has been allocated for an object that is
 *               not movable, but may return to be movable when the object
 *               is freed.
 * @ALLOCATED_MAPPED: Stable state. Page has been allocated, mapped to GPU
 *                    and has reference to kbase_mem_phy_alloc object.
 * @PT_MAPPED: Stable state. Similar to ALLOCATED_MAPPED, but page doesn't
 *             reference kbase_mem_phy_alloc object. Used as a page in MMU
 *             page table.
 * @FREE_IN_PROGRESS: Transitory state. A page is set to this status as soon as
 *                    the driver manages to acquire a lock on the page while
 *                    unmapping it. This status means that a memory release is
 *                    happening and it's still not complete.
 * @FREE_ISOLATED_IN_PROGRESS: Transitory state. This is a very particular corner case.
 *                             A page is isolated while it is in ALLOCATED_MAPPED state,
 *                             but then the driver tries to destroy the allocation.
 * @FREE_PT_ISOLATED_IN_PROGRESS: Transitory state. This is a very particular corner case.
 *                                A page is isolated while it is in PT_MAPPED state, but
 *                                then the driver tries to destroy the allocation.
 *
 * Pages can only be migrated in stable states.
 */
enum kbase_page_status {
	MEM_POOL = 0,
	ALLOCATE_IN_PROGRESS,
	SPILL_IN_PROGRESS,
	NOT_MOVABLE,
	ALLOCATED_MAPPED,
	PT_MAPPED,
	FREE_IN_PROGRESS,
	FREE_ISOLATED_IN_PROGRESS,
	FREE_PT_ISOLATED_IN_PROGRESS,
};

#define PGD_VPFN_LEVEL_MASK ((u64)0x3)
#define PGD_VPFN_LEVEL_GET_LEVEL(pgd_vpfn_level) (pgd_vpfn_level & PGD_VPFN_LEVEL_MASK)
#define PGD_VPFN_LEVEL_GET_VPFN(pgd_vpfn_level) (pgd_vpfn_level & ~PGD_VPFN_LEVEL_MASK)
#define PGD_VPFN_LEVEL_SET(pgd_vpfn, level) \
	((pgd_vpfn & ~PGD_VPFN_LEVEL_MASK) | (level & PGD_VPFN_LEVEL_MASK))

/**
 * struct kbase_page_metadata - Metadata for each page in kbase
 *
 * @kbdev:         Pointer to kbase device.
 * @dma_addr:      DMA address mapped to page.
 * @migrate_lock:  A spinlock to protect the private metadata.
 * @data:          Member in union valid based on @status.
 * @status:        Status to keep track if page can be migrated at any
 *                 given moment. MSB will indicate if page is isolated.
 *                 Protected by @migrate_lock.
 * @vmap_count:    Counter of kernel mappings.
 * @group_id:      Memory group ID obtained at the time of page allocation.
 *
 * Each small page will have a reference to this struct in the private field.
 * This will be used to keep track of information required for Linux page
 * migration functionality as well as address for DMA mapping.
 */
struct kbase_page_metadata {
	dma_addr_t dma_addr;
	spinlock_t migrate_lock;

	union {
		struct {
			struct kbase_mem_pool *pool;
			/* Pool could be terminated after page is isolated and therefore
			 * won't be able to get reference to kbase device.
			 */
			struct kbase_device *kbdev;
		} mem_pool;
		struct {
			struct kbase_va_region *reg;
			struct kbase_mmu_table *mmut;
			/* GPU virtual page frame number, in GPU_PAGE_SIZE units */
			u64 vpfn;
		} mapped;
		struct {
			struct kbase_mmu_table *mmut;
			/* GPU virtual page frame number info is in GPU_PAGE_SIZE units */
			u64 pgd_vpfn_level;
		} pt_mapped;
		struct {
			struct kbase_device *kbdev;
		} free_isolated;
		struct {
			struct kbase_device *kbdev;
		} free_pt_isolated;
	} data;

	u8 status;
	u8 vmap_count;
	u8 group_id;
};

/**
 * enum kbase_jit_report_flags - Flags for just-in-time memory allocation
 *                               pressure limit functions
 * @KBASE_JIT_REPORT_ON_ALLOC_OR_FREE: Notifying about an update happening due
 * to a just-in-time memory allocation or free
 *
 * Used to control flow within pressure limit related functions, or to provide
 * extra debugging information
 */
enum kbase_jit_report_flags { KBASE_JIT_REPORT_ON_ALLOC_OR_FREE = (1u << 0) };

/**
 * kbase_set_phy_alloc_page_status - Set the page migration status of the underlying
 *                                   physical allocation.
 * @alloc:  the physical allocation containing the pages whose metadata is going
 *          to be modified
 * @status: the status the pages should end up in
 *
 * Note that this function does not go through all of the checking to ensure that
 * proper states are set. Instead, it is only used when we change the allocation
 * to NOT_MOVABLE or from NOT_MOVABLE to ALLOCATED_MAPPED
 */
void kbase_set_phy_alloc_page_status(struct kbase_mem_phy_alloc *alloc,
				     enum kbase_page_status status);

static inline void kbase_mem_phy_alloc_gpu_mapped(struct kbase_mem_phy_alloc *alloc)
{
	KBASE_DEBUG_ASSERT(alloc);
	/* we only track mappings of NATIVE buffers */
	if (alloc->type == KBASE_MEM_TYPE_NATIVE)
		atomic_inc(&alloc->gpu_mappings);
}

static inline void kbase_mem_phy_alloc_gpu_unmapped(struct kbase_mem_phy_alloc *alloc)
{
	KBASE_DEBUG_ASSERT(alloc);
	/* we only track mappings of NATIVE buffers */
	if (alloc->type == KBASE_MEM_TYPE_NATIVE)
		if (atomic_dec_return(&alloc->gpu_mappings) < 0) {
			pr_err("Mismatched %s:\n", __func__);
			dump_stack();
		}
}

/**
 * kbase_mem_phy_alloc_kernel_mapped - Increment kernel_mappings counter for a
 *                                     memory region to prevent commit and flag
 *                                     changes
 *
 * @alloc:  Pointer to physical pages tracking object
 */
static inline void kbase_mem_phy_alloc_kernel_mapped(struct kbase_mem_phy_alloc *alloc)
{
	atomic_inc(&alloc->kernel_mappings);
}

/**
 * kbase_mem_phy_alloc_kernel_unmapped - Decrement kernel_mappings
 * counter for a memory region to allow commit and flag changes
 *
 * @alloc:  Pointer to physical pages tracking object
 */
static inline void kbase_mem_phy_alloc_kernel_unmapped(struct kbase_mem_phy_alloc *alloc)
{
	WARN_ON(atomic_dec_return(&alloc->kernel_mappings) < 0);
}

/**
 * kbase_mem_is_imported - Indicate whether a memory type is imported
 *
 * @type: the memory type
 *
 * Return: true if the memory type is imported, false otherwise
 */
static inline bool kbase_mem_is_imported(enum kbase_memory_type type)
{
	return (type == KBASE_MEM_TYPE_IMPORTED_UMM) || (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
}

void kbase_mem_kref_free(struct kref *kref);

/**
 * kbase_mem_init - Initialize kbase device for memory operation.
 * @kbdev: Pointer to the kbase device
 *
 * This function must be called only when a kbase device is initialized.
 *
 * Return: 0 on success
 */
int kbase_mem_init(struct kbase_device *kbdev);
void kbase_mem_halt(struct kbase_device *kbdev);
void kbase_mem_term(struct kbase_device *kbdev);

static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_get(struct kbase_mem_phy_alloc *alloc)
{
	kref_get(&alloc->kref);
	return alloc;
}

static inline struct kbase_mem_phy_alloc *kbase_mem_phy_alloc_put(struct kbase_mem_phy_alloc *alloc)
{
	kref_put(&alloc->kref, kbase_mem_kref_free);
	return NULL;
}

/**
 * struct kbase_va_region - A GPU memory region, and attributes for CPU mappings
 *
 * @rblink: Node in a red-black tree of memory regions within the same zone of
 *          the GPU's virtual address space.
 * @link:   Links to neighboring items in a list of growable memory regions
 *          that triggered incremental rendering by growing too much.
 * @rbtree:          Backlink to the red-black tree of memory regions.
 * @start_pfn:       The Page Frame Number in GPU virtual address space.
 * @user_data:       The address of GPU command queue when VA region represents
 *                   a ring buffer.
 * @nr_pages:        The size of the region in pages.
 * @initial_commit:  Initial commit, for aligning the start address and
 *                   correctly growing KBASE_REG_TILER_ALIGN_TOP regions.
 * @threshold_pages: If non-zero and the amount of memory committed to a region
 *                   that can grow on page fault exceeds this number of pages
 *                   then the driver switches to incremental rendering.
 * @flags:           Flags
 * @extension:    Number of pages allocated on page fault.
 * @cpu_alloc: The physical memory we mmap to the CPU when mapping this region.
 * @gpu_alloc: The physical memory we mmap to the GPU when mapping this region.
 * @jit_node:     Links to neighboring regions in the just-in-time memory pool.
 * @jit_usage_id: The last just-in-time memory usage ID for this region.
 * @jit_bin_id:   The just-in-time memory bin this region came from.
 * @va_refcnt:    Number of users of this region. Protected by reg_lock.
 * @no_user_free_count:    Number of contexts that want to prevent the region
 *                         from being freed by userspace.
 * @heap_info_gpu_addr: Pointer to an object in GPU memory defining an end of
 *                      an allocated region
 *                      The object can be one of:
 *                      - u32 value defining the size of the region
 *                      - u64 pointer first unused byte in the region
 *                      The interpretation of the object depends on
 *                      BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE flag in
 *                      jit_info_flags - if it is set, the heap info object
 *                      should be interpreted as size.
 * @used_pages: The current estimate of the number of pages used, which in
 *              normal use is either:
 *              - the initial estimate == va_pages
 *              - the actual pages used, as found by a JIT usage report
 *              Note that since the value is calculated from GPU memory after a
 *              JIT usage report, at any point in time it is allowed to take a
 *              random value that is no greater than va_pages (e.g. it may be
 *              greater than gpu_alloc->nents)
 */
struct kbase_va_region {
	struct rb_node rblink;
	struct list_head link;
	struct rb_root *rbtree;
	u64 start_pfn;
	void *user_data;
	size_t nr_pages;
	size_t initial_commit;
	size_t threshold_pages;
	unsigned long flags;
	size_t extension;
	struct kbase_mem_phy_alloc *cpu_alloc;
	struct kbase_mem_phy_alloc *gpu_alloc;
	struct list_head jit_node;
	u16 jit_usage_id;
	u8 jit_bin_id;

#if MALI_JIT_PRESSURE_LIMIT_BASE
	/* Pointer to an object in GPU memory defining an end of an allocated
	 * region
	 *
	 * The object can be one of:
	 * - u32 value defining the size of the region
	 * - u64 pointer first unused byte in the region
	 *
	 * The interpretation of the object depends on
	 * BASE_JIT_ALLOC_HEAP_INFO_IS_SIZE flag in jit_info_flags - if it is
	 * set, the heap info object should be interpreted as size.
	 */
	u64 heap_info_gpu_addr;

	/* The current estimate of the number of pages used, which in normal
	 * use is either:
	 * - the initial estimate == va_pages
	 * - the actual pages used, as found by a JIT usage report
	 *
	 * Note that since the value is calculated from GPU memory after a JIT
	 * usage report, at any point in time it is allowed to take a random
	 * value that is no greater than va_pages (e.g. it may be greater than
	 * gpu_alloc->nents)
	 */
	size_t used_pages;
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */

	kbase_refcount_t va_refcnt;
	atomic64_t no_user_free_count;
};

/* Special marker for failed JIT allocations that still must be marked as
 * in-use
 */
#define KBASE_RESERVED_REG_JIT_ALLOC ((struct kbase_va_region *)-1)

static inline bool kbase_is_region_free(struct kbase_va_region *reg)
{
	return (!reg || reg->flags & KBASE_REG_FREE);
}

static inline bool kbase_is_region_invalid(struct kbase_va_region *reg)
{
	return (!reg || reg->flags & KBASE_REG_VA_FREED);
}

static inline bool kbase_is_region_invalid_or_free(struct kbase_va_region *reg)
{
	/* Possibly not all functions that find regions would be using this
	 * helper, so they need to be checked when maintaining this function.
	 */
	return (kbase_is_region_invalid(reg) || kbase_is_region_free(reg));
}

/**
 * kbase_is_region_shrinkable - Check if a region is "shrinkable".
 * A shrinkable regions is a region for which its backing pages (reg->gpu_alloc->pages)
 * can be freed at any point, even though the kbase_va_region structure itself
 * may have been refcounted.
 * Regions that aren't on a shrinker, but could be shrunk at any point in future
 * without warning are still considered "shrinkable" (e.g. Active JIT allocs)
 *
 * @reg: Pointer to region
 *
 * Return: true if the region is "shrinkable", false if not.
 */
static inline bool kbase_is_region_shrinkable(struct kbase_va_region *reg)
{
	return (reg->flags & KBASE_REG_DONT_NEED) || (reg->flags & KBASE_REG_ACTIVE_JIT_ALLOC);
}

void kbase_remove_va_region(struct kbase_device *kbdev, struct kbase_va_region *reg);
static inline void kbase_region_refcnt_free(struct kbase_device *kbdev, struct kbase_va_region *reg)
{
	/* If region was mapped then remove va region*/
	if (reg->start_pfn)
		kbase_remove_va_region(kbdev, reg);

	/* To detect use-after-free in debug builds */
	KBASE_DEBUG_CODE(reg->flags |= KBASE_REG_FREE);
	kfree(reg);
}

static inline struct kbase_va_region *kbase_va_region_alloc_get(struct kbase_context *kctx,
								struct kbase_va_region *region)
{
	WARN_ON(!kbase_refcount_read(&region->va_refcnt));
	WARN_ON(kbase_refcount_read(&region->va_refcnt) == INT_MAX);

	dev_dbg(kctx->kbdev->dev, "va_refcnt %d before get %pK\n",
		kbase_refcount_read(&region->va_refcnt), (void *)region);
	kbase_refcount_inc(&region->va_refcnt);

	return region;
}

static inline struct kbase_va_region *kbase_va_region_alloc_put(struct kbase_context *kctx,
								struct kbase_va_region *region)
{
	WARN_ON(kbase_refcount_read(&region->va_refcnt) <= 0);
	WARN_ON(region->flags & KBASE_REG_FREE);

	if (kbase_refcount_dec_and_test(&region->va_refcnt))
		kbase_region_refcnt_free(kctx->kbdev, region);
	else
		dev_dbg(kctx->kbdev->dev, "va_refcnt %d after put %pK\n",
			kbase_refcount_read(&region->va_refcnt), (void *)region);

	return NULL;
}

/**
 * kbase_va_region_is_no_user_free - Check if user free is forbidden for the region.
 * A region that must not be freed by userspace indicates that it is owned by some other
 * kbase subsystem, for example tiler heaps, JIT memory or CSF queues.
 * Such regions must not be shrunk (i.e. have their backing pages freed), except by the
 * current owner.
 * Hence, callers cannot rely on this check alone to determine if a region might be shrunk
 * by any part of kbase. Instead they should use kbase_is_region_shrinkable().
 *
 * @region: Pointer to region.
 *
 * Return: true if userspace cannot free the region, false if userspace can free the region.
 */
static inline bool kbase_va_region_is_no_user_free(struct kbase_va_region *region)
{
	return atomic64_read(&region->no_user_free_count) > 0;
}

/**
 * kbase_va_region_no_user_free_inc - Increment "no user free" count for a region.
 * Calling this function will prevent the region to be shrunk by parts of kbase that
 * don't own the region (as long as the count stays above zero). Refer to
 * kbase_va_region_is_no_user_free() for more information.
 *
 * @region: Pointer to region (not shrinkable).
 *
 * Return: the pointer to the region passed as argument.
 */
static inline void kbase_va_region_no_user_free_inc(struct kbase_va_region *region)
{
	WARN_ON(kbase_is_region_shrinkable(region));
	WARN_ON(atomic64_read(&region->no_user_free_count) == S64_MAX);

	/* non-atomic as kctx->reg_lock is held */
	atomic64_inc(&region->no_user_free_count);
}

/**
 * kbase_va_region_no_user_free_dec - Decrement "no user free" count for a region.
 *
 * @region: Pointer to region (not shrinkable).
 */
static inline void kbase_va_region_no_user_free_dec(struct kbase_va_region *region)
{
	WARN_ON(!kbase_va_region_is_no_user_free(region));

	atomic64_dec(&region->no_user_free_count);
}

/* Common functions */
static inline struct tagged_addr *kbase_get_cpu_phy_pages(struct kbase_va_region *reg)
{
	KBASE_DEBUG_ASSERT(reg);
	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);

	return reg->cpu_alloc->pages;
}

static inline struct tagged_addr *kbase_get_gpu_phy_pages(struct kbase_va_region *reg)
{
	KBASE_DEBUG_ASSERT(reg);
	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);

	return reg->gpu_alloc->pages;
}

static inline size_t kbase_reg_current_backed_size(struct kbase_va_region *reg)
{
	KBASE_DEBUG_ASSERT(reg);
	/* if no alloc object the backed size naturally is 0 */
	if (!reg->cpu_alloc)
		return 0;

	KBASE_DEBUG_ASSERT(reg->cpu_alloc);
	KBASE_DEBUG_ASSERT(reg->gpu_alloc);
	KBASE_DEBUG_ASSERT(reg->cpu_alloc->nents == reg->gpu_alloc->nents);

	return reg->cpu_alloc->nents;
}

#define KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD \
	((size_t)(4 * 1024)) /* size above which vmalloc is used over kmalloc */

static inline struct kbase_mem_phy_alloc *kbase_alloc_create(struct kbase_context *kctx,
							     size_t nr_pages,
							     enum kbase_memory_type type,
							     int group_id)
{
	struct kbase_mem_phy_alloc *alloc;
	size_t alloc_size = sizeof(*alloc) + sizeof(*alloc->pages) * nr_pages;
	size_t per_page_size = sizeof(*alloc->pages);
	size_t i;

	/* Imported pages may have page private data already in use */
	if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
		alloc_size += nr_pages * sizeof(*alloc->imported.user_buf.dma_addrs);
		per_page_size += sizeof(*alloc->imported.user_buf.dma_addrs);
	}

	/*
	 * Prevent nr_pages*per_page_size + sizeof(*alloc) from
	 * wrapping around.
	 */
	if (nr_pages > ((((size_t)-1) - sizeof(*alloc)) / per_page_size))
		return ERR_PTR(-ENOMEM);

	/* Allocate based on the size to reduce internal fragmentation of vmem */
	if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
		alloc = vmalloc(alloc_size);
	else
		alloc = kmalloc(alloc_size, GFP_KERNEL);

	if (!alloc)
		return ERR_PTR(-ENOMEM);

	memset(alloc, 0, sizeof(struct kbase_mem_phy_alloc));

	if (type == KBASE_MEM_TYPE_NATIVE) {
		alloc->imported.native.nr_struct_pages = (alloc_size + (PAGE_SIZE - 1)) >>
							 PAGE_SHIFT;
		kbase_process_page_usage_inc(kctx, alloc->imported.native.nr_struct_pages);
	}

	/* Store allocation method */
	if (alloc_size > KBASE_MEM_PHY_ALLOC_LARGE_THRESHOLD)
		alloc->properties |= KBASE_MEM_PHY_ALLOC_LARGE;

	kref_init(&alloc->kref);
	atomic_set(&alloc->gpu_mappings, 0);
	atomic_set(&alloc->kernel_mappings, 0);
	alloc->nents = 0;
	alloc->pages = (void *)(alloc + 1);
	/* fill pages with invalid address value */
	for (i = 0; i < nr_pages; i++)
		alloc->pages[i] = as_tagged(KBASE_INVALID_PHYSICAL_ADDRESS);
	INIT_LIST_HEAD(&alloc->mappings);
	alloc->type = type;
	alloc->group_id = group_id;

	if (type == KBASE_MEM_TYPE_IMPORTED_USER_BUF)
		alloc->imported.user_buf.dma_addrs = (void *)(alloc->pages + nr_pages);

	return alloc;
}

static inline int kbase_reg_prepare_native(struct kbase_va_region *reg, struct kbase_context *kctx,
					   int group_id)
{
	KBASE_DEBUG_ASSERT(reg);
	KBASE_DEBUG_ASSERT(!reg->cpu_alloc);
	KBASE_DEBUG_ASSERT(!reg->gpu_alloc);
	KBASE_DEBUG_ASSERT(reg->flags & KBASE_REG_FREE);

	reg->cpu_alloc = kbase_alloc_create(kctx, reg->nr_pages, KBASE_MEM_TYPE_NATIVE, group_id);
	if (IS_ERR(reg->cpu_alloc))
		return PTR_ERR(reg->cpu_alloc);
	else if (!reg->cpu_alloc)
		return -ENOMEM;

	reg->cpu_alloc->imported.native.kctx = kctx;
	if (kbase_ctx_flag(kctx, KCTX_INFINITE_CACHE) && (reg->flags & KBASE_REG_CPU_CACHED)) {
		reg->gpu_alloc =
			kbase_alloc_create(kctx, reg->nr_pages, KBASE_MEM_TYPE_NATIVE, group_id);
		if (IS_ERR_OR_NULL(reg->gpu_alloc)) {
			kbase_mem_phy_alloc_put(reg->cpu_alloc);
			return -ENOMEM;
		}
		reg->gpu_alloc->imported.native.kctx = kctx;
	} else {
		reg->gpu_alloc = kbase_mem_phy_alloc_get(reg->cpu_alloc);
	}

	mutex_lock(&kctx->jit_evict_lock);
	INIT_LIST_HEAD(&reg->cpu_alloc->evict_node);
	INIT_LIST_HEAD(&reg->gpu_alloc->evict_node);
	mutex_unlock(&kctx->jit_evict_lock);

	reg->flags &= ~KBASE_REG_FREE;

	return 0;
}

/*
 * Max size for kbdev memory pool (in pages)
 */
#define KBASE_MEM_POOL_MAX_SIZE_KBDEV (SZ_64M >> PAGE_SHIFT)

/*
 * Max size for kctx memory pool (in pages)
 */
#define KBASE_MEM_POOL_MAX_SIZE_KCTX (SZ_64M >> PAGE_SHIFT)

/*
 * The order required for a 2MB page allocation (2^order * PAGE_SIZE = 2MB)
 */
#define KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER (__builtin_ffs(NUM_PAGES_IN_2MB_LARGE_PAGE) - 1)

/*
 * The order required for a small page allocation
 */
#define KBASE_MEM_POOL_SMALL_PAGE_TABLE_ORDER 0

/**
 * kbase_mem_pool_config_set_max_size - Set maximum number of free pages in
 *                                      initial configuration of a memory pool
 *
 * @config:   Initial configuration for a physical memory pool
 * @max_size: Maximum number of free pages that a pool created from
 *            @config can hold
 */
static inline void kbase_mem_pool_config_set_max_size(struct kbase_mem_pool_config *const config,
						      size_t const max_size)
{
	WRITE_ONCE(config->max_size, max_size);
}

/**
 * kbase_mem_pool_config_get_max_size - Get maximum number of free pages from
 *                                      initial configuration of a memory pool
 *
 * @config: Initial configuration for a physical memory pool
 *
 * Return: Maximum number of free pages that a pool created from @config
 *         can hold
 */
static inline size_t
kbase_mem_pool_config_get_max_size(const struct kbase_mem_pool_config *const config)
{
	return READ_ONCE(config->max_size);
}

/**
 * kbase_mem_pool_init - Create a memory pool for a kbase device
 * @pool:      Memory pool to initialize
 * @config:    Initial configuration for the memory pool
 * @order:     Page order for physical page size (order=0 => small page, order != 0 => 2MB)
 * @group_id:  A memory group ID to be passed to a platform-specific
 *             memory group manager, if present.
 *             Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
 * @kbdev:     Kbase device where memory is used
 * @next_pool: Pointer to the next pool or NULL.
 *
 * Allocations from @pool are in whole pages. Each @pool has a free list where
 * pages can be quickly allocated from. The free list is initially empty and
 * filled whenever pages are freed back to the pool. The number of free pages
 * in the pool will in general not exceed @max_size, but the pool may in
 * certain corner cases grow above @max_size.
 *
 * If @next_pool is not NULL, we will allocate from @next_pool before going to
 * the memory group manager. Similarly pages can spill over to @next_pool when
 * @pool is full. Pages are zeroed before they spill over to another pool, to
 * prevent leaking information between applications.
 *
 * A shrinker is registered so that Linux mm can reclaim pages from the pool as
 * needed.
 *
 * Return: 0 on success, negative -errno on error
 */
int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config,
			unsigned int order, int group_id, struct kbase_device *kbdev,
			struct kbase_mem_pool *next_pool);

/**
 * kbase_mem_pool_term - Destroy a memory pool
 * @pool:  Memory pool to destroy
 *
 * Pages in the pool will spill over to @next_pool (if available) or freed to
 * the kernel.
 */
void kbase_mem_pool_term(struct kbase_mem_pool *pool);

/**
 * kbase_mem_pool_alloc - Allocate a page from memory pool
 * @pool:  Memory pool to allocate from
 *
 * Allocations from the pool are made as follows:
 * 1. If there are free pages in the pool, allocate a page from @pool.
 * 2. Otherwise, if @next_pool is not NULL and has free pages, allocate a page
 *    from @next_pool.
 * 3. Return NULL if no memory in the pool
 *
 * Return: Pointer to allocated page, or NULL if allocation failed.
 *
 * Note : This function should not be used if the pool lock is held. Use
 * kbase_mem_pool_alloc_locked() instead.
 */
struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool);

/**
 * kbase_mem_pool_alloc_locked - Allocate a page from memory pool
 * @pool:  Memory pool to allocate from
 *
 * If there are free pages in the pool, this function allocates a page from
 * @pool. This function does not use @next_pool.
 *
 * Return: Pointer to allocated page, or NULL if allocation failed.
 *
 * Note : Caller must hold the pool lock.
 */
struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool);

/**
 * kbase_mem_pool_free - Free a page to memory pool
 * @pool:  Memory pool where page should be freed
 * @page:  Page to free to the pool
 * @dirty: Whether some of the page may be dirty in the cache.
 *
 * Pages are freed to the pool as follows:
 * 1. If @pool is not full, add @page to @pool.
 * 2. Otherwise, if @next_pool is not NULL and not full, add @page to
 *    @next_pool.
 * 3. Finally, free @page to the kernel.
 *
 * Note : This function should not be used if the pool lock is held. Use
 * kbase_mem_pool_free_locked() instead.
 */
void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *page, bool dirty);

/**
 * kbase_mem_pool_free_locked - Free a page to memory pool
 * @pool:  Memory pool where page should be freed
 * @p:     Page to free to the pool
 * @dirty: Whether some of the page may be dirty in the cache.
 *
 * If @pool is not full, this function adds @page to @pool. Otherwise, @page is
 * freed to the kernel. This function does not use @next_pool.
 *
 * Note : Caller must hold the pool lock.
 */
void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p, bool dirty);

/**
 * kbase_mem_pool_alloc_pages - Allocate pages from memory pool
 * @pool:     Memory pool to allocate from
 * @nr_small_pages: Number of pages to allocate
 * @pages:    Pointer to array where the physical address of the allocated
 *            pages will be stored.
 * @partial_allowed: If fewer pages allocated is allowed
 * @page_owner: Pointer to the task that created the Kbase context for which
 *              the pages are being allocated. It can be NULL if the pages
 *              won't be associated with any Kbase context.
 *
 * Like kbase_mem_pool_alloc() but optimized for allocating many pages.
 *
 * Return:
 * On success number of pages allocated (could be less than nr_pages if
 * partial_allowed).
 * On error an error code.
 *
 * Note : This function should not be used if the pool lock is held. Use
 * kbase_mem_pool_alloc_pages_locked() instead.
 *
 * The caller must not hold vm_lock, as this could cause a deadlock if
 * the kernel OoM killer runs. If the caller must allocate pages while holding
 * this lock, it should use kbase_mem_pool_alloc_pages_locked() instead.
 */
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_small_pages,
			       struct tagged_addr *pages, bool partial_allowed,
			       struct task_struct *page_owner);

/**
 * kbase_mem_pool_alloc_pages_locked - Allocate pages from memory pool
 * @pool:        Memory pool to allocate from
 * @nr_small_pages: Number of pages to allocate
 * @pages:       Pointer to array where the physical address of the allocated
 *               pages will be stored.
 *
 * Like kbase_mem_pool_alloc() but optimized for allocating many pages. This
 * version does not allocate new pages from the kernel, and therefore will never
 * trigger the OoM killer. Therefore, it can be run while the vm_lock is held.
 *
 * As new pages can not be allocated, the caller must ensure there are
 * sufficient pages in the pool. Usage of this function should look like :
 *
 *   kbase_gpu_vm_lock(kctx);
 *   kbase_mem_pool_lock(pool)
 *   while (kbase_mem_pool_size(pool) < pages_required) {
 *     kbase_mem_pool_unlock(pool)
 *     kbase_gpu_vm_unlock(kctx);
 *     kbase_mem_pool_grow(pool)
 *     kbase_gpu_vm_lock(kctx);
 *     kbase_mem_pool_lock(pool)
 *   }
 *   kbase_mem_pool_alloc_pages_locked(pool)
 *   kbase_mem_pool_unlock(pool)
 *   Perform other processing that requires vm_lock...
 *   kbase_gpu_vm_unlock(kctx);
 *
 * This ensures that the pool can be grown to the required size and that the
 * allocation can complete without another thread using the newly grown pages.
 *
 * Return:
 * On success number of pages allocated.
 * On error an error code.
 *
 * Note : Caller must hold the pool lock.
 */
int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool, size_t nr_small_pages,
				      struct tagged_addr *pages);

/**
 * kbase_mem_pool_free_pages - Free pages to memory pool
 * @pool:     Memory pool where pages should be freed
 * @nr_pages: Number of pages to free
 * @pages:    Pointer to array holding the physical addresses of the pages to
 *            free.
 * @dirty:    Whether any pages may be dirty in the cache.
 * @reclaimed: Whether the pages where reclaimable and thus should bypass
 *             the pool and go straight to the kernel.
 *
 * Like kbase_mem_pool_free() but optimized for freeing many pages.
 */
void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
			       struct tagged_addr *pages, bool dirty, bool reclaimed);

/**
 * kbase_mem_pool_free_pages_locked - Free pages to memory pool
 * @pool:     Memory pool where pages should be freed
 * @nr_pages: Number of pages to free
 * @pages:    Pointer to array holding the physical addresses of the pages to
 *            free.
 * @dirty:    Whether any pages may be dirty in the cache.
 * @reclaimed: Whether the pages where reclaimable and thus should bypass
 *             the pool and go straight to the kernel.
 *
 * Like kbase_mem_pool_free() but optimized for freeing many pages.
 */
void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool, size_t nr_pages,
				      struct tagged_addr *pages, bool dirty, bool reclaimed);

/**
 * kbase_mem_pool_size - Get number of free pages in memory pool
 * @pool:  Memory pool to inspect
 *
 * Note: the size of the pool may in certain corner cases exceed @max_size!
 *
 * Return: Number of free pages in the pool
 */
static inline size_t kbase_mem_pool_size(struct kbase_mem_pool *pool)
{
	return READ_ONCE(pool->cur_size);
}

/**
 * kbase_mem_pool_max_size - Get maximum number of free pages in memory pool
 * @pool:  Memory pool to inspect
 *
 * Return: Maximum number of free pages in the pool
 */
static inline size_t kbase_mem_pool_max_size(struct kbase_mem_pool *pool)
{
	return pool->max_size;
}

/**
 * kbase_mem_pool_set_max_size - Set maximum number of free pages in memory pool
 * @pool:     Memory pool to inspect
 * @max_size: Maximum number of free pages the pool can hold
 *
 * If @max_size is reduced, the pool will be shrunk to adhere to the new limit.
 * For details see kbase_mem_pool_shrink().
 */
void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size);

/**
 * kbase_mem_pool_grow - Grow the pool
 * @pool:       Memory pool to grow
 * @nr_to_grow: Number of pages to add to the pool
 * @page_owner: Pointer to the task that created the Kbase context for which
 *              the memory pool is being grown. It can be NULL if the pages
 *              to be allocated won't be associated with any Kbase context.
 *
 * Adds @nr_to_grow pages to the pool. Note that this may cause the pool to
 * become larger than the maximum size specified.
 *
 * Return: 0 on success, -ENOMEM if unable to allocate sufficient pages or
 * -EPERM if the allocation of pages is not permitted due to the process exit
 * or context termination.
 */
int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow,
			struct task_struct *page_owner);

/**
 * kbase_mem_pool_trim - Grow or shrink the pool to a new size
 * @pool:     Memory pool to trim
 * @new_size: New number of pages in the pool
 *
 * If @new_size > @cur_size, fill the pool with new pages from the kernel, but
 * not above the max_size for the pool.
 * If @new_size < @cur_size, shrink the pool by freeing pages to the kernel.
 */
void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size);

/**
 * kbase_mem_pool_mark_dying - Mark that this pool is dying
 * @pool:     Memory pool
 *
 * This will cause any ongoing allocation operations (eg growing on page fault)
 * to be terminated.
 */
void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool);

/**
 * kbase_mem_alloc_page - Allocate a new page for a device
 * @pool:  Memory pool to allocate a page from
 * @alloc_from_kthread:  Flag indicating that the current thread is a kernel thread.
 *
 * Most uses should use kbase_mem_pool_alloc to allocate a page. However that
 * function can fail in the event the pool is empty.
 *
 * Return: A new page or NULL if no memory
 */
struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool, const bool alloc_from_kthread);

/**
 * kbase_mem_pool_free_page - Free a page from a memory pool.
 * @pool:  Memory pool to free a page from
 * @p:     Page to free
 *
 * This will free any associated data stored for the page and release
 * the page back to the kernel.
 */
void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p);

bool kbase_check_alloc_flags(unsigned long flags);
bool kbase_check_import_flags(unsigned long flags);

static inline bool kbase_import_size_is_valid(struct kbase_device *kbdev, u64 va_pages)
{
	if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
		dev_dbg(kbdev->dev,
			"Import attempted with va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
			(unsigned long long)va_pages);
		return false;
	}

	return true;
}

static inline bool kbase_alias_size_is_valid(struct kbase_device *kbdev, u64 va_pages)
{
	if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
		dev_dbg(kbdev->dev,
			"Alias attempted with va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
			(unsigned long long)va_pages);
		return false;
	}

	return true;
}

/**
 * kbase_check_alloc_sizes - check user space sizes parameters for an
 *                           allocation
 *
 * @kctx:         kbase context
 * @flags:        The flags passed from user space
 * @va_pages:     The size of the requested region, in pages.
 * @commit_pages: Number of pages to commit initially.
 * @extension:       Number of pages to grow by on GPU page fault and/or alignment
 *                (depending on flags)
 *
 * Makes checks on the size parameters passed in from user space for a memory
 * allocation call, with respect to the flags requested.
 *
 * Return: 0 if sizes are valid for these flags, negative error code otherwise
 */
int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags, u64 va_pages,
			    u64 commit_pages, u64 extension);

/**
 * kbase_update_region_flags - Convert user space flags to kernel region flags
 *
 * @kctx:  kbase context
 * @reg:   The region to update the flags on
 * @flags: The flags passed from user space
 *
 * The user space flag BASE_MEM_COHERENT_SYSTEM_REQUIRED will be rejected and
 * this function will fail if the system does not support system coherency.
 *
 * Return: 0 if successful, -EINVAL if the flags are not supported
 */
int kbase_update_region_flags(struct kbase_context *kctx, struct kbase_va_region *reg,
			      unsigned long flags);

/**
 * kbase_gpu_vm_lock() - Acquire the per-context region list lock
 * @kctx:  KBase context
 *
 * Care must be taken when making an allocation whilst holding this lock, because of interaction
 * with the Kernel's OoM-killer and use of this lock in &vm_operations_struct close() handlers.
 *
 * If this lock is taken during a syscall, and/or the allocation is 'small' then it is safe to use.
 *
 * If the caller is not in a syscall, and the allocation is 'large', then it must not hold this
 * lock.
 *
 * This is because the kernel OoM killer might target the process corresponding to that same kbase
 * context, and attempt to call the context's close() handlers for its open VMAs. This is safe if
 * the allocating caller is in a syscall, because the VMA close() handlers are delayed until all
 * syscalls have finished (noting that no new syscalls can start as the remaining user threads will
 * have been killed too), and so there is no possibility of contention between the thread
 * allocating with this lock held, and the VMA close() handler.
 *
 * However, outside of a syscall (e.g. a kworker or other kthread), one of kbase's VMA close()
 * handlers (kbase_cpu_vm_close()) also takes this lock, and so prevents the process from being
 * killed until the caller of the function allocating memory has released this lock. On subsequent
 * retries for allocating a page, the OoM killer would be re-invoked but skips over the process
 * stuck in its close() handler.
 *
 * Also because the caller is not in a syscall, the page allocation code in the kernel is not aware
 * that the allocation is being done on behalf of another process, and so does not realize that
 * process has received a kill signal due to an OoM, and so will continually retry with the OoM
 * killer until enough memory has been released, or until all other killable processes have been
 * killed (at which point the kernel halts with a panic).
 *
 * However, if the allocation outside of a syscall is small enough to be satisfied by killing
 * another process, then the allocation completes, the caller releases this lock, and
 * kbase_cpu_vm_close() can unblock and allow the process to be killed.
 *
 * Hence, this is effectively a deadlock with kbase_cpu_vm_close(), except that if the memory
 * allocation is small enough the deadlock can be resolved. For that reason, such a memory deadlock
 * is NOT discovered with CONFIG_PROVE_LOCKING.
 *
 * If this may be called outside of a syscall, consider moving allocations outside of this lock, or
 * use __GFP_NORETRY for such allocations (which will allow direct-reclaim attempts, but will
 * prevent OoM kills to satisfy the allocation, and will just fail the allocation instead).
 */
void kbase_gpu_vm_lock(struct kbase_context *kctx);

/**
 * kbase_gpu_vm_lock_with_pmode_sync() - Wrapper of kbase_gpu_vm_lock.
 * @kctx:  KBase context
 *
 * Same as kbase_gpu_vm_lock for JM GPU.
 * Additionally acquire P.mode read-write semaphore for CSF GPU.
 */
void kbase_gpu_vm_lock_with_pmode_sync(struct kbase_context *kctx);

/**
 * kbase_gpu_vm_unlock() - Release the per-context region list lock
 * @kctx:  KBase context
 */
void kbase_gpu_vm_unlock(struct kbase_context *kctx);

/**
 * kbase_gpu_vm_unlock_with_pmode_sync() - Wrapper of kbase_gpu_vm_unlock.
 * @kctx:  KBase context
 *
 * Same as kbase_gpu_vm_unlock for JM GPU.
 * Additionally release P.mode read-write semaphore for CSF GPU.
 */
void kbase_gpu_vm_unlock_with_pmode_sync(struct kbase_context *kctx);

int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size);

/**
 * kbase_gpu_mmap - Register region and map it on the GPU.
 *
 * @kctx: kbase context containing the region
 * @reg: the region to add
 * @addr: the address to insert the region at
 * @nr_pages: the number of pages in the region
 * @align: the minimum alignment in pages
 * @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
 *
 * Call kbase_add_va_region() and map the region on the GPU.
 *
 * Return: 0 on success, error code otherwise.
 */
int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr,
		   size_t nr_pages, size_t align, enum kbase_caller_mmu_sync_info mmu_sync_info);

/**
 * kbase_gpu_munmap - Remove the region from the GPU and unregister it.
 *
 * @kctx:  KBase context
 * @reg:   The region to remove
 *
 * Must be called with context lock held.
 *
 * Return: 0 on success, error code otherwise.
 */
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_mmu_update - Configure an address space on the GPU to the specified
 *                    MMU tables
 *
 * @kbdev: Kbase device structure
 * @mmut:  The set of MMU tables to be configured on the address space
 * @as_nr: The address space to be configured
 *
 * The caller has the following locking conditions:
 * - It must hold kbase_device->mmu_hw_mutex
 * - It must hold the hwaccess_lock
 */
void kbase_mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, int as_nr);

/**
 * kbase_mmu_disable() - Disable the MMU for a previously active kbase context.
 * @kctx:	Kbase context
 *
 * Disable and perform the required cache maintenance to remove the all
 * data from provided kbase context from the GPU caches.
 *
 * The caller has the following locking conditions:
 * - It must hold kbase_device->mmu_hw_mutex
 * - It must hold the hwaccess_lock
 */
void kbase_mmu_disable(struct kbase_context *kctx);

/**
 * kbase_mmu_disable_as() - Set the MMU to unmapped mode for the specified
 * address space.
 * @kbdev:	Kbase device
 * @as_nr:	The address space number to set to unmapped.
 *
 * This function must only be called during reset/power-up and it used to
 * ensure the registers are in a known state.
 *
 * The caller must hold kbdev->mmu_hw_mutex.
 */
void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr);

void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat);

#if defined(CONFIG_MALI_VECTOR_DUMP)
/**
 * kbase_mmu_dump() - Dump the MMU tables to a buffer.
 *
 * @kctx:        The kbase context to dump
 * @nr_pages:    The number of pages to allocate for the buffer.
 *
 * This function allocates a buffer (of @c nr_pages pages) to hold a dump
 * of the MMU tables and fills it. If the buffer is too small
 * then the return value will be NULL.
 *
 * The GPU vm lock must be held when calling this function.
 *
 * The buffer returned should be freed with @ref vfree when it is no longer
 * required.
 *
 * Return: The address of the buffer containing the MMU dump or NULL on error
 * (including if the @c nr_pages is too small)
 */
void *kbase_mmu_dump(struct kbase_context *kctx, size_t nr_pages);
#endif

/**
 * kbase_sync_now - Perform cache maintenance on a memory region
 *
 * @kctx: The kbase context of the region
 * @sset: A syncset structure describing the region and direction of the
 *        synchronisation required
 *
 * Return: 0 on success or error code
 */
int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset);
void kbase_sync_single(struct kbase_context *kctx, struct tagged_addr cpu_pa,
		       struct tagged_addr gpu_pa, off_t offset, size_t size,
		       enum kbase_sync_type sync_fn);

/* OS specific functions */
int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr);
int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *reg);
void kbase_os_mem_map_lock(struct kbase_context *kctx);
void kbase_os_mem_map_unlock(struct kbase_context *kctx);

/**
 * kbasep_os_process_page_usage_update() - Update the memory allocation
 *                                         counters for the current process.
 *
 * @kctx:  The kbase context
 * @pages: The desired delta to apply to the memory usage counters.
 *
 * OS specific call to updates the current memory allocation counters
 * for the current process with the supplied delta.
 */

void kbasep_os_process_page_usage_update(struct kbase_context *kctx, int pages);

/**
 * kbase_process_page_usage_inc() - Add to the memory allocation counters for
 *                                  the current process
 *
 * @kctx:  The kernel base context used for the allocation.
 * @pages: The desired delta to apply to the memory usage counters.
 *
 * OS specific call to add to the current memory allocation counters for
 * the current process by the supplied amount.
 */

static inline void kbase_process_page_usage_inc(struct kbase_context *kctx, int pages)
{
	kbasep_os_process_page_usage_update(kctx, pages);
}

/**
 * kbase_process_page_usage_dec() - Subtract from the memory allocation
 *                                  counters for the current process.
 *
 * @kctx:  The kernel base context used for the allocation.
 * @pages: The desired delta to apply to the memory usage counters.
 *
 * OS specific call to subtract from the current memory allocation counters
 * for the current process by the supplied amount.
 */

static inline void kbase_process_page_usage_dec(struct kbase_context *kctx, int pages)
{
	kbasep_os_process_page_usage_update(kctx, 0 - pages);
}

/**
 * kbasep_find_enclosing_cpu_mapping_offset() - Find the offset of the CPU
 * mapping of a memory allocation containing a given address range
 *
 * @kctx:      The kernel base context used for the allocation.
 * @uaddr:     Start of the CPU virtual address range.
 * @size:      Size of the CPU virtual address range (in bytes).
 * @offset:    The offset from the start of the allocation to the specified CPU
 *             virtual address.
 *
 * Searches for a CPU mapping of any part of any region that fully encloses the
 * CPU virtual address range specified by @uaddr and @size. Returns a failure
 * indication if only part of the address range lies within a CPU mapping.
 *
 * Return: 0 if offset was obtained successfully. Error code otherwise.
 */
int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx, unsigned long uaddr,
					     size_t size, u64 *offset);

/**
 * kbasep_find_enclosing_gpu_mapping_start_and_offset() - Find the address of
 * the start of GPU virtual memory region which encloses @gpu_addr for the
 * @size length in bytes
 *
 * @kctx:	The kernel base context within which the memory is searched.
 * @gpu_addr:	GPU virtual address for which the region is sought; defines
 *              the beginning of the provided region.
 * @size:       The length (in bytes) of the provided region for which the
 *              GPU virtual memory region is sought.
 * @start:      Pointer to the location where the address of the start of
 *              the found GPU virtual memory region is.
 * @offset:     Pointer to the location where the offset of @gpu_addr into
 *              the found GPU virtual memory region is.
 *
 * Searches for the memory region in GPU virtual memory space which contains
 * the region defined by the @gpu_addr and @size, where @gpu_addr is the
 * beginning and @size the length in bytes of the provided region. If found,
 * the location of the start address of the GPU virtual memory region is
 * passed in @start pointer and the location of the offset of the region into
 * the GPU virtual memory region is passed in @offset pointer.
 *
 * Return: 0 on success, error code otherwise.
 */
int kbasep_find_enclosing_gpu_mapping_start_and_offset(struct kbase_context *kctx, u64 gpu_addr,
						       size_t size, u64 *start, u64 *offset);

/**
 * kbase_alloc_phy_pages_helper - Allocates physical pages.
 * @alloc:              allocation object to add pages to
 * @nr_pages_requested: number of physical pages to allocate
 *
 * Allocates @nr_pages_requested and updates the alloc object.
 *
 * Note: if kbase_gpu_vm_lock() is to be held around this function to ensure thread-safe updating
 * of @alloc, then refer to the documentation of kbase_gpu_vm_lock() about the requirements of
 * either calling during a syscall, or ensuring the allocation is small. These requirements prevent
 * an effective deadlock between the kernel's OoM killer and kbase's VMA close() handlers, which
 * could take kbase_gpu_vm_lock() too.
 *
 * If the requirements of kbase_gpu_vm_lock() cannot be satisfied when calling this function, but
 * @alloc must still be updated in a thread-safe way, then instead use
 * kbase_alloc_phy_pages_helper_locked() and restructure callers into the sequence outlined there.
 *
 * This function cannot be used from interrupt context
 *
 * Return: 0 if all pages have been successfully allocated. Error code otherwise
 */
int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested);

/**
 * kbase_alloc_phy_pages_helper_locked - Allocates physical pages.
 * @alloc:              allocation object to add pages to
 * @pool:               Memory pool to allocate from
 * @nr_pages_requested: number of physical pages to allocate
 *
 * @prealloc_sa:        Information about the partial allocation if the amount of memory requested
 *                      is not a multiple of 2MB. One instance of struct kbase_sub_alloc must be
 *                      allocated by the caller if kbdev->pagesize_2mb is enabled.
 *
 * Allocates @nr_pages_requested and updates the alloc object. This function does not allocate new
 * pages from the kernel, and therefore will never trigger the OoM killer. Therefore, it can be
 * called whilst a thread operating outside of a syscall has held the region list lock
 * (kbase_gpu_vm_lock()), as it will not cause an effective deadlock with VMA close() handlers used
 * by the OoM killer.
 *
 * As new pages can not be allocated, the caller must ensure there are sufficient pages in the
 * pool. Usage of this function should look like :
 *
 *   kbase_gpu_vm_lock(kctx);
 *   kbase_mem_pool_lock(pool)
 *   while (kbase_mem_pool_size(pool) < pages_required) {
 *     kbase_mem_pool_unlock(pool)
 *     kbase_gpu_vm_unlock(kctx);
 *     kbase_mem_pool_grow(pool)
 *     kbase_gpu_vm_lock(kctx);
 *     kbase_mem_pool_lock(pool)
 *   }
 *   kbase_alloc_phy_pages_helper_locked(pool)
 *   kbase_mem_pool_unlock(pool)
 *   // Perform other processing that requires vm_lock...
 *   kbase_gpu_vm_unlock(kctx);
 *
 * This ensures that the pool can be grown to the required size and that the allocation can
 * complete without another thread using the newly grown pages.
 *
 * If kbdev->pagesize_2mb is enabled and the allocation is >= 2MB, then @pool must be one of the
 * pools from alloc->imported.native.kctx->mem_pools.large[]. Otherwise it must be one of the
 * mempools from alloc->imported.native.kctx->mem_pools.small[].
 *
 * @prealloc_sa is used to manage the non-2MB sub-allocation. It has to be pre-allocated because we
 * must not sleep (due to the usage of kmalloc()) whilst holding pool->pool_lock.  @prealloc_sa
 * shall be set to NULL if it has been consumed by this function to indicate that the caller no
 * longer owns it and should not access it further.
 *
 * Note: Caller must hold @pool->pool_lock
 *
 * Return: Pointer to array of allocated pages. NULL on failure.
 */
struct tagged_addr *kbase_alloc_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
							struct kbase_mem_pool *pool,
							size_t nr_pages_requested,
							struct kbase_sub_alloc **prealloc_sa);

/**
 * kbase_free_phy_pages_helper() - Free physical pages.
 *
 * @alloc:            allocation object to free pages from
 * @nr_pages_to_free: number of physical pages to free
 *
 * Free @nr_pages_to_free pages and updates the alloc object.
 *
 * Return: 0 on success, otherwise a negative error code
 */
int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free);

/**
 * kbase_free_phy_pages_helper_locked - Free pages allocated with
 *                                      kbase_alloc_phy_pages_helper_locked()
 * @alloc:            Allocation object to free pages from
 * @pool:             Memory pool to return freed pages to
 * @pages:            Pages allocated by kbase_alloc_phy_pages_helper_locked()
 * @nr_pages_to_free: Number of physical pages to free
 *
 * This function atomically frees pages allocated with
 * kbase_alloc_phy_pages_helper_locked(). @pages is the pointer to the page
 * array that is returned by that function. @pool must be the pool that the
 * pages were originally allocated from.
 *
 * If the mem_pool has been unlocked since the allocation then
 * kbase_free_phy_pages_helper() should be used instead.
 */
void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
					struct kbase_mem_pool *pool, struct tagged_addr *pages,
					size_t nr_pages_to_free);

static inline void kbase_set_dma_addr_as_priv(struct page *p, dma_addr_t dma_addr)
{
	SetPagePrivate(p);
	if (sizeof(dma_addr_t) > sizeof(p->private)) {
		/* on 32-bit ARM with LPAE dma_addr_t becomes larger, but the
		 * private field stays the same. So we have to be clever and
		 * use the fact that we only store DMA addresses of whole pages,
		 * so the low bits should be zero
		 */
		KBASE_DEBUG_ASSERT(!(dma_addr & (PAGE_SIZE - 1)));
		set_page_private(p, dma_addr >> PAGE_SHIFT);
	} else {
		set_page_private(p, dma_addr);
	}
}

static inline dma_addr_t kbase_dma_addr_as_priv(struct page *p)
{
	if (sizeof(dma_addr_t) > sizeof(p->private))
		return ((dma_addr_t)page_private(p)) << PAGE_SHIFT;

	return (dma_addr_t)page_private(p);
}

static inline void kbase_clear_dma_addr_as_priv(struct page *p)
{
	ClearPagePrivate(p);
}

static inline struct kbase_page_metadata *kbase_page_private(struct page *p)
{
	return (struct kbase_page_metadata *)page_private(p);
}

static inline dma_addr_t kbase_dma_addr(struct page *p)
{
	if (kbase_is_page_migration_enabled())
		return kbase_page_private(p)->dma_addr;

	return kbase_dma_addr_as_priv(p);
}

static inline dma_addr_t kbase_dma_addr_from_tagged(struct tagged_addr tagged_pa)
{
	phys_addr_t pa = as_phys_addr_t(tagged_pa);
	struct page *page = pfn_to_page(PFN_DOWN(pa));
	dma_addr_t dma_addr = (is_huge(tagged_pa) || is_partial(tagged_pa)) ?
					    kbase_dma_addr_as_priv(page) :
					    kbase_dma_addr(page);

	return dma_addr;
}

/**
 * kbase_flush_mmu_wqs() - Flush MMU workqueues.
 * @kbdev:   Device pointer.
 *
 * This function will cause any outstanding page or bus faults to be processed.
 * It should be called prior to powering off the GPU.
 */
void kbase_flush_mmu_wqs(struct kbase_device *kbdev);

/**
 * kbase_sync_single_for_device - update physical memory and give GPU ownership
 * @kbdev: Device pointer
 * @handle: DMA address of region
 * @size: Size of region to sync
 * @dir:  DMA data direction
 */

void kbase_sync_single_for_device(struct kbase_device *kbdev, dma_addr_t handle, size_t size,
				  enum dma_data_direction dir);

/**
 * kbase_sync_single_for_cpu - update physical memory and give CPU ownership
 * @kbdev: Device pointer
 * @handle: DMA address of region
 * @size: Size of region to sync
 * @dir:  DMA data direction
 */

void kbase_sync_single_for_cpu(struct kbase_device *kbdev, dma_addr_t handle, size_t size,
			       enum dma_data_direction dir);

#if IS_ENABLED(CONFIG_DEBUG_FS)
/**
 * kbase_jit_debugfs_init - Add per context debugfs entry for JIT.
 * @kctx: kbase context
 */
void kbase_jit_debugfs_init(struct kbase_context *kctx);
#endif /* CONFIG_DEBUG_FS */

/**
 * kbase_jit_init - Initialize the JIT memory pool management
 * @kctx: kbase context
 *
 * This function must be called only when a kbase context is instantiated.
 *
 * Return: zero on success or negative error number on failure.
 */
int kbase_jit_init(struct kbase_context *kctx);

/**
 * kbase_jit_allocate - Allocate JIT memory
 * @kctx: kbase context
 * @info: JIT allocation information
 * @ignore_pressure_limit: Whether the JIT memory pressure limit is ignored
 *
 * Return: JIT allocation on success or NULL on failure.
 */
struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
					   const struct base_jit_alloc_info *info,
					   bool ignore_pressure_limit);

/**
 * kbase_jit_free - Free a JIT allocation
 * @kctx: kbase context
 * @reg: JIT allocation
 *
 * Frees a JIT allocation and places it into the free pool for later reuse.
 */
void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_jit_backing_lost - Inform JIT that an allocation has lost backing
 * @reg: JIT allocation
 */
void kbase_jit_backing_lost(struct kbase_va_region *reg);

/**
 * kbase_jit_evict - Evict a JIT allocation from the pool
 * @kctx: kbase context
 *
 * Evict the least recently used JIT allocation from the pool. This can be
 * required if normal VA allocations are failing due to VA exhaustion.
 *
 * Return: True if a JIT allocation was freed, false otherwise.
 */
bool kbase_jit_evict(struct kbase_context *kctx);

/**
 * kbase_jit_term - Terminate the JIT memory pool management
 * @kctx: kbase context
 */
void kbase_jit_term(struct kbase_context *kctx);

#if MALI_JIT_PRESSURE_LIMIT_BASE
/**
 * kbase_trace_jit_report_gpu_mem_trace_enabled - variant of
 * kbase_trace_jit_report_gpu_mem() that should only be called once the
 * corresponding tracepoint is verified to be enabled
 * @kctx: kbase context
 * @reg:  Just-in-time memory region to trace
 * @flags: combination of values from enum kbase_jit_report_flags
 */
void kbase_trace_jit_report_gpu_mem_trace_enabled(struct kbase_context *kctx,
						  struct kbase_va_region *reg, unsigned int flags);
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */

/**
 * kbase_trace_jit_report_gpu_mem - Trace information about the GPU memory used
 * to make a JIT report
 * @kctx: kbase context
 * @reg:  Just-in-time memory region to trace
 * @flags: combination of values from enum kbase_jit_report_flags
 *
 * Information is traced using the trace_mali_jit_report_gpu_mem() tracepoint.
 *
 * In case that tracepoint is not enabled, this function should have the same
 * low overheads as a tracepoint itself (i.e. use of 'jump labels' to avoid
 * conditional branches)
 *
 * This can take the reg_lock on @kctx, do not use in places where this lock is
 * already held.
 *
 * Note: this has to be a macro because at this stage the tracepoints have not
 * been included. Also gives no opportunity for the compiler to mess up
 * inlining it.
 */
#if MALI_JIT_PRESSURE_LIMIT_BASE
#define kbase_trace_jit_report_gpu_mem(kctx, reg, flags)                                      \
	do {                                                                                  \
		if (trace_mali_jit_report_gpu_mem_enabled())                                  \
			kbase_trace_jit_report_gpu_mem_trace_enabled((kctx), (reg), (flags)); \
	} while (0)
#else
#define kbase_trace_jit_report_gpu_mem(kctx, reg, flags) CSTD_NOP(kctx, reg, flags)
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */

#if MALI_JIT_PRESSURE_LIMIT_BASE
/**
 * kbase_jit_report_update_pressure - safely update the JIT physical page
 * pressure and JIT region's estimate of used_pages
 * @kctx: kbase context, to update the current physical pressure
 * @reg:  Just-in-time memory region to update with @new_used_pages
 * @new_used_pages: new value of number of pages used in the JIT region
 * @flags: combination of values from enum kbase_jit_report_flags
 *
 * Takes care of:
 * - correctly updating the pressure given the current reg->used_pages and
 * new_used_pages
 * - then updating the %kbase_va_region used_pages member
 *
 * Precondition:
 * - new_used_pages <= reg->nr_pages
 */
void kbase_jit_report_update_pressure(struct kbase_context *kctx, struct kbase_va_region *reg,
				      u64 new_used_pages, unsigned int flags);

/**
 * kbase_jit_trim_necessary_pages() - calculate and trim the least pages
 * possible to satisfy a new JIT allocation
 *
 * @kctx: Pointer to the kbase context
 * @needed_pages: Number of JIT physical pages by which trimming is requested.
 *                The actual number of pages trimmed could differ.
 *
 * Before allocating a new just-in-time memory region or reusing a previous
 * one, ensure that the total JIT physical page usage also will not exceed the
 * pressure limit.
 *
 * If there are no reported-on allocations, then we already guarantee this will
 * be the case - because our current pressure then only comes from the va_pages
 * of each JIT region, hence JIT physical page usage is guaranteed to be
 * bounded by this.
 *
 * However as soon as JIT allocations become "reported on", the pressure is
 * lowered to allow new JIT regions to be allocated. It is after such a point
 * that the total JIT physical page usage could (either now or in the future on
 * a grow-on-GPU-page-fault) exceed the pressure limit, but only on newly
 * allocated JIT regions. Hence, trim any "reported on" regions.
 *
 * Any pages freed will go into the pool and be allocated from there in
 * kbase_mem_alloc().
 */
void kbase_jit_trim_necessary_pages(struct kbase_context *kctx, size_t needed_pages);

/*
 * Same as kbase_jit_request_phys_increase(), except that Caller is supposed
 * to take jit_evict_lock also on @kctx before calling this function.
 */
static inline void kbase_jit_request_phys_increase_locked(struct kbase_context *kctx,
							  size_t needed_pages)
{
#if !MALI_USE_CSF
	lockdep_assert_held(&kctx->jctx.lock);
#endif /* !MALI_USE_CSF */
	lockdep_assert_held(&kctx->reg_lock);
	lockdep_assert_held(&kctx->jit_evict_lock);

	kctx->jit_phys_pages_to_be_allocated += needed_pages;

	kbase_jit_trim_necessary_pages(kctx, kctx->jit_phys_pages_to_be_allocated);
}

/**
 * kbase_jit_request_phys_increase() - Increment the backing pages count and do
 * the required trimming before allocating pages for a JIT allocation.
 *
 * @kctx: Pointer to the kbase context
 * @needed_pages: Number of pages to be allocated for the JIT allocation.
 *
 * This function needs to be called before allocating backing pages for a
 * just-in-time memory region. The backing pages are currently allocated when,
 *
 * - A new JIT region is created.
 * - An old JIT region is reused from the cached pool.
 * - GPU page fault occurs for the active JIT region.
 * - Backing is grown for the JIT region through the commit ioctl.
 *
 * This function would ensure that the total JIT physical page usage does not
 * exceed the pressure limit even when the backing pages get allocated
 * simultaneously for multiple JIT allocations from different threads.
 *
 * There should be a matching call to kbase_jit_done_phys_increase(), after
 * the pages have been allocated and accounted against the active JIT
 * allocation.
 *
 * Caller is supposed to take reg_lock on @kctx before calling this function.
 */
static inline void kbase_jit_request_phys_increase(struct kbase_context *kctx, size_t needed_pages)
{
#if !MALI_USE_CSF
	lockdep_assert_held(&kctx->jctx.lock);
#endif /* !MALI_USE_CSF */
	lockdep_assert_held(&kctx->reg_lock);

	mutex_lock(&kctx->jit_evict_lock);
	kbase_jit_request_phys_increase_locked(kctx, needed_pages);
	mutex_unlock(&kctx->jit_evict_lock);
}

/**
 * kbase_jit_done_phys_increase() - Decrement the backing pages count after the
 * allocation of pages for a JIT allocation.
 *
 * @kctx: Pointer to the kbase context
 * @needed_pages: Number of pages that were allocated for the JIT allocation.
 *
 * This function should be called after backing pages have been allocated and
 * accounted against the active JIT allocation.
 * The call should be made when the following have been satisfied:
 *    when the allocation is on the jit_active_head.
 *    when additional needed_pages have been allocated.
 *    kctx->reg_lock was held during the above and has not yet been unlocked.
 * Failure to call this function before unlocking the kctx->reg_lock when
 * either the above have changed may result in over-accounting the memory.
 * This ensures kbase_jit_trim_necessary_pages() gets a consistent count of
 * the memory.
 *
 * A matching call to kbase_jit_request_phys_increase() should have been made,
 * before the allocation of backing pages.
 *
 * Caller is supposed to take reg_lock on @kctx before calling this function.
 */
static inline void kbase_jit_done_phys_increase(struct kbase_context *kctx, size_t needed_pages)
{
	lockdep_assert_held(&kctx->reg_lock);

	WARN_ON(kctx->jit_phys_pages_to_be_allocated < needed_pages);

	kctx->jit_phys_pages_to_be_allocated -= needed_pages;
}
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */

/**
 * kbase_has_exec_va_zone - EXEC_VA zone predicate
 *
 * @kctx: kbase context
 *
 * Determine whether an EXEC_VA zone has been created for the GPU address space
 * of the given kbase context.
 *
 * Return: True if the kbase context has an EXEC_VA zone.
 */
bool kbase_has_exec_va_zone(struct kbase_context *kctx);

/**
 * kbase_map_external_resource - Map an external resource to the GPU.
 * @kctx:              kbase context.
 * @reg:               External resource to map.
 * @locked_mm:         The mm_struct which has been locked for this operation.
 *
 * On successful mapping, the VA region and the gpu_alloc refcounts will be
 * increased, making it safe to use and store both values directly.
 *
 * For imported user buffers, this function will acquire the necessary
 * resources if they've not already been acquired before, in order to
 * create a valid GPU mapping.
 *
 * Return: Zero on success, or negative error code.
 */
int kbase_map_external_resource(struct kbase_context *kctx, struct kbase_va_region *reg,
				struct mm_struct *locked_mm);

/**
 * kbase_unmap_external_resource - Unmap an external resource from the GPU.
 * @kctx:  kbase context.
 * @reg:   VA region corresponding to external resource
 *
 * On successful unmapping, the VA region and the gpu_alloc refcounts will
 * be decreased. If the refcount reaches zero, both @reg and the corresponding
 * allocation may be freed, so using them after returning from this function
 * requires the caller to explicitly check their state.
 *
 * For imported user buffers, in the case where the refcount reaches zero,
 * the function shall release all the resources acquired by the user buffer,
 * including DMA mappings and physical pages.
 */
void kbase_unmap_external_resource(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_unpin_user_buf_page - Unpin a page of a user buffer.
 * @page: page to unpin
 *
 * The caller must have ensured that there are no CPU mappings for @page (as
 * might be created from the struct kbase_mem_phy_alloc that tracks @page), and
 * that userspace will not be able to recreate the CPU mappings again.
 */
void kbase_unpin_user_buf_page(struct page *page);

/**
 * kbase_user_buf_pin_pages - Pin the pages of a user buffer.
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * To successfully pin the pages for a user buffer the current mm_struct must
 * be the same as the mm_struct of the user buffer. Further calls to this
 * function fail if pages have already been pinned successfully.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_pin_pages(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_user_buf_unpin_pages - Release the pinned pages of a user buffer.
 * @alloc: The allocation for the imported user buffer.
 *
 * The caller must have ensured that previous stages of the termination of
 * the physical allocation have already been completed, which implies that
 * GPU mappings have been destroyed and DMA addresses have been unmapped.
 *
 * This function does not affect CPU mappings: if there are any, they should
 * be unmapped by the caller prior to calling this function.
 */
void kbase_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc);

/**
 * kbase_user_buf_dma_map_pages - DMA map pages of a user buffer.
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * Acquire DMA addresses for the pages of the user buffer. Automatic CPU cache
 * synchronization will be disabled because, in the general case, DMA mappings
 * might be larger than the region to import. Further calls to this function
 * fail if DMA addresses have already been obtained successfully.
 *
 * The caller must have ensured that physical pages have already been pinned
 * prior to calling this function.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_dma_map_pages(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_user_buf_dma_unmap_pages - DMA unmap pages of a user buffer.
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * The caller must have ensured that GPU mappings have been destroyed prior to
 * calling this function.
 */
void kbase_user_buf_dma_unmap_pages(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_user_buf_empty_init - Initialize a user buffer as "empty".
 * @reg: The region associated with the imported user buffer.
 *
 * This function initializes a user buffer as "empty".
 */
void kbase_user_buf_empty_init(struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_empty_to_pinned - Transition user buffer from "empty" to "pinned".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "empty" state, in which no resources are
 * attached to it, to the "pinned" state, in which physical pages have been acquired and pinned.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_from_empty_to_pinned(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_empty_to_dma_mapped - Transition user buffer from "empty" to "DMA mapped".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "empty" state, in which no resources are
 * attached to it, to the "DMA mapped" state, in which physical pages have been acquired, pinned
 * and DMA mappings for cache synchronization have been obtained.
 *
 * Notice that the "empty" state is preserved in case of failure.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_from_empty_to_dma_mapped(struct kbase_context *kctx,
					    struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_empty_to_gpu_mapped - Transition user buffer from "empty" to "GPU mapped".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "empty" state, in which no resources are
 * attached to it, to the "GPU mapped" state, in which DMA mappings for cache synchronization
 * have been obtained and GPU mappings have been created.
 *
 * However, the function does not update the counter of GPU mappings in usage, because different
 * policies may be applied in different points of the driver.
 *
 * Notice that the "empty" state is preserved in case of failure.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_from_empty_to_gpu_mapped(struct kbase_context *kctx,
					    struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_pinned_to_empty - Transition user buffer from "pinned" to "empty".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "pinned" state, in which physical pages
 * have been acquired and pinned but no mappings are present, to the "empty" state, in which
 * physical pages have been unpinned.
 */
void kbase_user_buf_from_pinned_to_empty(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_pinned_to_gpu_mapped - Transition user buffer from "pinned" to "GPU mapped".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "pinned" state, in which physical pages
 * have been acquired and pinned but no mappings are present, to the "GPU mapped" state, in which
 * DMA mappings for cache synchronization have been obtained and GPU mappings have been created.
 *
 * However, the function does not update the counter of GPU mappings in use, because different
 * policies may be applied in different points of the driver.
 *
 * Notice that the "pinned" state is preserved in case of failure.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_from_pinned_to_gpu_mapped(struct kbase_context *kctx,
					     struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_dma_mapped_to_pinned - Transition user buffer from "DMA mapped" to "pinned".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "DMA mapped" state, in which physical pages
 * have been acquired and pinned and DMA mappings have been obtained, to the "pinned" state,
 * in which DMA mappings have been released but physical pages are still pinned.
 */
void kbase_user_buf_from_dma_mapped_to_pinned(struct kbase_context *kctx,
					      struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_dma_mapped_to_empty - Transition user buffer from "DMA mapped" to "empty".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "DMA mapped" state, in which physical pages
 * have been acquired and pinned and DMA mappings have been obtained, to the "empty" state,
 * in which DMA mappings have been released and physical pages have been unpinned.
 */
void kbase_user_buf_from_dma_mapped_to_empty(struct kbase_context *kctx,
					     struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_dma_mapped_to_gpu_mapped - Transition user buffer from "DMA mapped" to "GPU mapped".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "DMA mapped" state, in which physical pages
 * have been acquired and pinned and DMA mappings have been obtained, to the "GPU mapped" state,
 * in which GPU mappings have been created.
 *
 * However, the function does not update the counter of GPU mappings in usage, because different
 * policies may be applied in different points of the driver.
 *
 * Notice that the "DMA mapped" state is preserved in case of failure.
 *
 * Return: zero on success or negative number on failure.
 */
int kbase_user_buf_from_dma_mapped_to_gpu_mapped(struct kbase_context *kctx,
						 struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_gpu_mapped_to_pinned - Transition user buffer from "GPU mapped" to "pinned".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "GPU mapped" state, in which physical pages
 * have been acquired and pinned, DMA mappings have been obtained, and GPU mappings have been
 * created, to the "pinned" state, in which all mappings have been torn down but physical pages
 * are still pinned.
 *
 * However, the function does not update the counter of GPU mappings in usage, because different
 * policies may be applied in different points of the driver.
 */
void kbase_user_buf_from_gpu_mapped_to_pinned(struct kbase_context *kctx,
					      struct kbase_va_region *reg);

/**
 * kbase_user_buf_from_gpu_mapped_to_empty - Transition user buffer from "GPU mapped" to "empty".
 * @kctx: kbase context.
 * @reg:  The region associated with the imported user buffer.
 *
 * This function transitions a user buffer from the "GPU mapped" state, in which physical pages
 * have been acquired and pinned, DMA mappings have been obtained, and GPU mappings have been
 * created, to the "empty" state, in which all mappings have been torn down and physical pages
 * have been unpinned.
 *
 * However, the function does not update the counter of GPU mappings in usage, because different
 * policies may be applied in different points of the driver.
 */
void kbase_user_buf_from_gpu_mapped_to_empty(struct kbase_context *kctx,
					     struct kbase_va_region *reg);

/**
 * kbase_sticky_resource_init - Initialize sticky resource management.
 * @kctx: kbase context
 *
 * Return: zero on success or negative error number on failure.
 */
int kbase_sticky_resource_init(struct kbase_context *kctx);

/**
 * kbase_sticky_resource_acquire - Acquire a reference on a sticky resource.
 * @kctx:     kbase context.
 * @gpu_addr: The GPU address of the external resource.
 *
 * Return: The metadata object which represents the binding between the
 * external resource and the kbase context on success or NULL on failure.
 */
struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(struct kbase_context *kctx,
							     u64 gpu_addr);

/**
 * kbase_sticky_resource_release - Release a reference on a sticky resource.
 * @kctx:     kbase context.
 * @meta:     Binding metadata.
 * @gpu_addr: GPU address of the external resource.
 *
 * If meta is NULL then gpu_addr will be used to scan the metadata list and
 * find the matching metadata (if any), otherwise the provided meta will be
 * used and gpu_addr will be ignored.
 *
 * Return: True if the release found the metadata and the reference was dropped.
 */
bool kbase_sticky_resource_release(struct kbase_context *kctx, struct kbase_ctx_ext_res_meta *meta,
				   u64 gpu_addr);

/**
 * kbase_sticky_resource_release_force - Release a sticky resource.
 * @kctx:     kbase context.
 * @meta:     Binding metadata.
 * @gpu_addr: GPU address of the external resource.
 *
 * If meta is NULL then gpu_addr will be used to scan the metadata list and
 * find the matching metadata (if any), otherwise the provided meta will be
 * used and gpu_addr will be ignored.
 *
 * Return: True if the release found the metadata and the resource was
 * released.
 */
bool kbase_sticky_resource_release_force(struct kbase_context *kctx,
					 struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr);

/**
 * kbase_sticky_resource_term - Terminate sticky resource management.
 * @kctx: kbase context
 */
void kbase_sticky_resource_term(struct kbase_context *kctx);

/**
 * kbase_mem_pool_lock - Lock a memory pool
 * @pool: Memory pool to lock
 */
static inline void kbase_mem_pool_lock(struct kbase_mem_pool *pool)
{
	spin_lock(&pool->pool_lock);
}

/**
 * kbase_mem_pool_unlock - Release a memory pool
 * @pool: Memory pool to lock
 */
static inline void kbase_mem_pool_unlock(struct kbase_mem_pool *pool)
{
	spin_unlock(&pool->pool_lock);
}

/**
 * kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
 * @alloc: The physical allocation
 */
void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc);

#if MALI_USE_CSF
/**
 * kbase_link_event_mem_page - Add the new event memory region to the per
 *                             context list of event pages.
 * @kctx: Pointer to kbase context
 * @reg: Pointer to the region allocated for event memory.
 *
 * The region being linked shouldn't have been marked as free and should
 * have KBASE_REG_CSF_EVENT flag set for it.
 */
static inline void kbase_link_event_mem_page(struct kbase_context *kctx,
					     struct kbase_va_region *reg)
{
	lockdep_assert_held(&kctx->reg_lock);

	WARN_ON(reg->flags & KBASE_REG_FREE);
	WARN_ON(!(reg->flags & KBASE_REG_CSF_EVENT));

	list_add(&reg->link, &kctx->csf.event_pages_head);
}

/**
 * kbase_unlink_event_mem_page - Remove the event memory region from the per
 *                               context list of event pages.
 * @kctx: Pointer to kbase context
 * @reg: Pointer to the region allocated for event memory.
 *
 * The region being un-linked shouldn't have been marked as free and should
 * have KBASE_REG_CSF_EVENT flag set for it.
 */
static inline void kbase_unlink_event_mem_page(struct kbase_context *kctx,
					       struct kbase_va_region *reg)
{
	lockdep_assert_held(&kctx->reg_lock);

	WARN_ON(reg->flags & KBASE_REG_FREE);
	WARN_ON(!(reg->flags & KBASE_REG_CSF_EVENT));

	list_del(&reg->link);
}

/**
 * kbase_mcu_shared_interface_region_tracker_init - Initialize the rb tree to
 *         manage the shared interface segment of MCU firmware address space.
 * @kbdev: Pointer to the kbase device
 *
 * Return: zero on success or negative error number on failure.
 */
int kbase_mcu_shared_interface_region_tracker_init(struct kbase_device *kbdev);

/**
 * kbase_mcu_shared_interface_region_tracker_term - Teardown the rb tree
 *         managing the shared interface segment of MCU firmware address space.
 * @kbdev: Pointer to the kbase device
 */
void kbase_mcu_shared_interface_region_tracker_term(struct kbase_device *kbdev);
#endif

/**
 * kbase_mem_umm_map - Map dma-buf
 * @kctx: Pointer to the kbase context
 * @reg: Pointer to the region of the imported dma-buf to map
 *
 * Map a dma-buf on the GPU. The mappings are reference counted.
 *
 * Return: 0 on success, or a negative error code.
 */
int kbase_mem_umm_map(struct kbase_context *kctx, struct kbase_va_region *reg);

/**
 * kbase_mem_umm_unmap - Unmap dma-buf
 * @kctx: Pointer to the kbase context
 * @reg: Pointer to the region of the imported dma-buf to unmap
 * @alloc: Pointer to the alloc to release
 *
 * Unmap a dma-buf from the GPU. The mappings are reference counted.
 *
 * @reg must be the original region with GPU mapping of @alloc; or NULL. If
 * @reg is NULL, or doesn't match @alloc, the GPU page table entries matching
 * @reg will not be updated.
 *
 * @alloc must be a valid physical allocation of type
 * KBASE_MEM_TYPE_IMPORTED_UMM that was previously mapped by
 * kbase_mem_umm_map(). The dma-buf attachment referenced by @alloc will
 * release it's mapping reference, and if the refcount reaches 0, also be
 * unmapped, regardless of the value of @reg.
 */
void kbase_mem_umm_unmap(struct kbase_context *kctx, struct kbase_va_region *reg,
			 struct kbase_mem_phy_alloc *alloc);

/**
 * kbase_mem_do_sync_imported - Sync caches for imported memory
 * @kctx: Pointer to the kbase context
 * @reg: Pointer to the region with imported memory to sync
 * @sync_fn: The type of sync operation to perform
 *
 * Sync CPU caches for supported (currently only dma-buf (UMM)) memory.
 * Attempting to sync unsupported imported memory types will result in an error
 * code, -EINVAL.
 *
 * Return: 0 on success, or a negative error code.
 */
int kbase_mem_do_sync_imported(struct kbase_context *kctx, struct kbase_va_region *reg,
			       enum kbase_sync_type sync_fn);

/**
 * kbase_mem_copy_to_pinned_user_pages - Memcpy from source input page to
 * an unaligned address at a given offset from the start of a target page.
 *
 * @dest_pages:		Pointer to the array of pages to which the content is
 *			to be copied from the provided @src_page.
 * @src_page:		Pointer to the page which correspond to the source page
 *			from which the copying will take place.
 * @to_copy:		Total number of bytes pending to be copied from
 *			@src_page to @target_page_nr within @dest_pages.
 *			This will get decremented by number of bytes we
 *			managed to copy from source page to target pages.
 * @nr_pages:		Total number of pages present in @dest_pages.
 * @target_page_nr:	Target page number to which @src_page needs to be
 *			copied. This will get incremented by one if
 *			we are successful in copying from source page.
 * @offset:		Offset in bytes into the target pages from which the
 *			copying is to be performed.
 *
 * Return: 0 on success, or a negative error code.
 */
int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages, void *src_page, size_t *to_copy,
					unsigned int nr_pages, unsigned int *target_page_nr,
					size_t offset);

/**
 * kbase_mem_allow_alloc - Check if allocation of GPU memory is allowed
 * @kctx: Pointer to kbase context
 *
 * Don't allow the allocation of GPU memory if the ioctl has been issued
 * from the forked child process using the mali device file fd inherited from
 * the parent process.
 *
 * Return: true if allocation is allowed.
 */
static inline bool kbase_mem_allow_alloc(struct kbase_context *kctx)
{
	return (kctx->process_mm == current->mm);
}

/**
 * kbase_mem_mmgrab - Wrapper function to take reference on mm_struct of current process
 */
static inline void kbase_mem_mmgrab(void)
{
	/* This merely takes a reference on the memory descriptor structure
	 * i.e. mm_struct of current process and not on its address space and
	 * so won't block the freeing of address space on process exit.
	 */
#if KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE
	atomic_inc(&current->mm->mm_count);
#else
	mmgrab(current->mm);
#endif
}

/**
 * kbase_mem_group_id_get - Get group ID from flags
 * @flags: Flags to pass to base_mem_alloc
 *
 * This inline function extracts the encoded group ID from flags
 * and converts it into numeric value (0~15).
 *
 * Return: group ID(0~15) extracted from the parameter
 */
static inline int kbase_mem_group_id_get(base_mem_alloc_flags flags)
{
	KBASE_DEBUG_ASSERT((flags & ~BASE_MEM_FLAGS_INPUT_MASK) == 0);
	return (int)BASE_MEM_GROUP_ID_GET(flags);
}

/**
 * kbase_mem_group_id_set - Set group ID into base_mem_alloc_flags
 * @id: group ID(0~15) you want to encode
 *
 * This inline function encodes specific group ID into base_mem_alloc_flags.
 * Parameter 'id' should lie in-between 0 to 15.
 *
 * Return: base_mem_alloc_flags with the group ID (id) encoded
 *
 * The return value can be combined with other flags against base_mem_alloc
 * to identify a specific memory group.
 */
static inline base_mem_alloc_flags kbase_mem_group_id_set(int id)
{
	return BASE_MEM_GROUP_ID_SET(id);
}
#endif /* _KBASE_MEM_H_ */