73
73
*/
74
74
#define ZS_ALIGN 8
75
75
76
- /*
77
- * A single 'zspage' is composed of up to 2^N discontiguous 0-order (single)
78
- * pages. ZS_MAX_ZSPAGE_ORDER defines upper limit on N.
79
- */
80
- #define ZS_MAX_ZSPAGE_ORDER 2
81
- #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_ZSPAGE_ORDER)
76
+ #define ZS_MAX_PAGES_PER_ZSPAGE (_AC(1, UL) << ZS_MAX_PAGE_ORDER)
82
77
83
78
#define ZS_HANDLE_SIZE (sizeof(unsigned long))
84
79
123
118
#define ISOLATED_BITS 3
124
119
#define MAGIC_VAL_BITS 8
125
120
126
- #define MAX (a , b ) ((a) >= (b) ? (a) : (b))
127
- /* ZS_MIN_ALLOC_SIZE must be multiple of ZS_ALIGN */
128
- #define ZS_MIN_ALLOC_SIZE \
129
- MAX(32, (ZS_MAX_PAGES_PER_ZSPAGE << PAGE_SHIFT >> OBJ_INDEX_BITS))
121
+ #define ZS_MIN_ALLOC_SIZE 32U
122
+
130
123
/* each chunk includes extra space to keep handle */
131
124
#define ZS_MAX_ALLOC_SIZE PAGE_SIZE
132
125
140
133
* determined). NOTE: all those class sizes must be set as multiple of
141
134
* ZS_ALIGN to make sure link_free itself never has to span 2 pages.
142
135
*
143
- * ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
144
- * (reason above)
136
+ * pool->min_alloc_size ( ZS_MIN_ALLOC_SIZE) and ZS_SIZE_CLASS_DELTA must
137
+ * be multiple of ZS_ALIGN (reason above)
145
138
*/
146
139
#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
147
- #define ZS_SIZE_CLASSES (DIV_ROUND_UP(ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE, \
148
- ZS_SIZE_CLASS_DELTA) + 1)
149
140
150
141
enum fullness_group {
151
142
ZS_EMPTY ,
@@ -234,12 +225,15 @@ struct zs_ops {
234
225
struct zs_pool {
235
226
const char * name ;
236
227
237
- struct size_class * size_class [ ZS_SIZE_CLASSES ] ;
228
+ struct size_class * * size_class ;
238
229
struct kmem_cache * handle_cachep ;
239
230
struct kmem_cache * zspage_cachep ;
240
231
241
232
atomic_long_t pages_allocated ;
242
233
234
+ u32 num_size_classes ;
235
+ u32 min_alloc_size ;
236
+
243
237
struct zs_pool_stats stats ;
244
238
245
239
/* Compact classes */
@@ -595,15 +589,15 @@ static void set_zspage_mapping(struct zspage *zspage,
595
589
* classes depending on its size. This function returns index of the
596
590
* size class which has chunk size big enough to hold the given size.
597
591
*/
598
- static int get_size_class_index (int size )
592
+ static int get_size_class_index (struct zs_pool * pool , int size )
599
593
{
600
594
int idx = 0 ;
601
595
602
- if (likely (size > ZS_MIN_ALLOC_SIZE ))
603
- idx = DIV_ROUND_UP (size - ZS_MIN_ALLOC_SIZE ,
596
+ if (likely (size > pool -> min_alloc_size ))
597
+ idx = DIV_ROUND_UP (size - pool -> min_alloc_size ,
604
598
ZS_SIZE_CLASS_DELTA );
605
599
606
- return min_t (int , ZS_SIZE_CLASSES - 1 , idx );
600
+ return min_t (int , pool -> num_size_classes - 1 , idx );
607
601
}
608
602
609
603
/* type can be of enum type class_stat_type or fullness_group */
@@ -663,7 +657,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
663
657
"obj_allocated" , "obj_used" , "pages_used" ,
664
658
"pages_per_zspage" , "freeable" );
665
659
666
- for (i = 0 ; i < ZS_SIZE_CLASSES ; i ++ ) {
660
+ for (i = 0 ; i < pool -> num_size_classes ; i ++ ) {
667
661
class = pool -> size_class [i ];
668
662
669
663
if (class -> index != i )
@@ -849,13 +843,13 @@ static enum fullness_group fix_fullness_group(struct size_class *class,
849
843
* link together 3 PAGE_SIZE sized pages to form a zspage
850
844
* since then we can perfectly fit in 8 such objects.
851
845
*/
852
- static int get_pages_per_zspage (int class_size )
846
+ static int get_pages_per_zspage (u32 class_size , u32 num_pages )
853
847
{
854
848
int i , max_usedpc = 0 ;
855
849
/* zspage order which gives maximum used size per KB */
856
850
int max_usedpc_order = 1 ;
857
851
858
- for (i = 1 ; i <= ZS_MAX_PAGES_PER_ZSPAGE ; i ++ ) {
852
+ for (i = 1 ; i <= num_pages ; i ++ ) {
859
853
int zspage_size ;
860
854
int waste , usedpc ;
861
855
@@ -1315,7 +1309,7 @@ unsigned int zs_lookup_class_index(struct zs_pool *pool, unsigned int size)
1315
1309
{
1316
1310
struct size_class * class ;
1317
1311
1318
- class = pool -> size_class [get_size_class_index (size )];
1312
+ class = pool -> size_class [get_size_class_index (pool , size )];
1319
1313
1320
1314
return class -> index ;
1321
1315
}
@@ -1526,7 +1520,7 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
1526
1520
1527
1521
/* extra space in chunk to keep the handle */
1528
1522
size += ZS_HANDLE_SIZE ;
1529
- class = pool -> size_class [get_size_class_index (size )];
1523
+ class = pool -> size_class [get_size_class_index (pool , size )];
1530
1524
1531
1525
/* pool->lock effectively protects the zpage migration */
1532
1526
spin_lock (& pool -> lock );
@@ -2102,7 +2096,7 @@ static void async_free_zspage(struct work_struct *work)
2102
2096
struct zs_pool * pool = container_of (work , struct zs_pool ,
2103
2097
free_work );
2104
2098
2105
- for (i = 0 ; i < ZS_SIZE_CLASSES ; i ++ ) {
2099
+ for (i = 0 ; i < pool -> num_size_classes ; i ++ ) {
2106
2100
class = pool -> size_class [i ];
2107
2101
if (class -> index != i )
2108
2102
continue ;
@@ -2250,7 +2244,7 @@ unsigned long zs_compact(struct zs_pool *pool)
2250
2244
struct size_class * class ;
2251
2245
unsigned long pages_freed = 0 ;
2252
2246
2253
- for (i = ZS_SIZE_CLASSES - 1 ; i >= 0 ; i -- ) {
2247
+ for (i = pool -> num_size_classes - 1 ; i >= 0 ; i -- ) {
2254
2248
class = pool -> size_class [i ];
2255
2249
if (class -> index != i )
2256
2250
continue ;
@@ -2294,7 +2288,7 @@ static unsigned long zs_shrinker_count(struct shrinker *shrinker,
2294
2288
struct zs_pool * pool = container_of (shrinker , struct zs_pool ,
2295
2289
shrinker );
2296
2290
2297
- for (i = ZS_SIZE_CLASSES - 1 ; i >= 0 ; i -- ) {
2291
+ for (i = pool -> num_size_classes - 1 ; i >= 0 ; i -- ) {
2298
2292
class = pool -> size_class [i ];
2299
2293
if (class -> index != i )
2300
2294
continue ;
@@ -2336,11 +2330,27 @@ struct zs_pool *zs_create_pool(const char *name)
2336
2330
int i ;
2337
2331
struct zs_pool * pool ;
2338
2332
struct size_class * prev_class = NULL ;
2333
+ unsigned long num_pages ;
2339
2334
2340
2335
pool = kzalloc (sizeof (* pool ), GFP_KERNEL );
2341
2336
if (!pool )
2342
2337
return NULL ;
2343
2338
2339
+ num_pages = 1UL << ZS_DEFAULT_PAGE_ORDER ;
2340
+ /* min_alloc_size must be multiple of ZS_ALIGN */
2341
+ pool -> min_alloc_size = num_pages << PAGE_SHIFT >> OBJ_INDEX_BITS ;
2342
+ pool -> min_alloc_size = max (pool -> min_alloc_size , ZS_MIN_ALLOC_SIZE );
2343
+
2344
+ pool -> num_size_classes =
2345
+ DIV_ROUND_UP (ZS_MAX_ALLOC_SIZE - pool -> min_alloc_size ,
2346
+ ZS_SIZE_CLASS_DELTA ) + 1 ;
2347
+
2348
+ pool -> size_class = kmalloc_array (pool -> num_size_classes ,
2349
+ sizeof (struct size_class * ),
2350
+ GFP_KERNEL | __GFP_ZERO );
2351
+ if (!pool -> size_class )
2352
+ goto err ;
2353
+
2344
2354
init_deferred_free (pool );
2345
2355
spin_lock_init (& pool -> lock );
2346
2356
@@ -2355,17 +2365,17 @@ struct zs_pool *zs_create_pool(const char *name)
2355
2365
* Iterate reversely, because, size of size_class that we want to use
2356
2366
* for merging should be larger or equal to current size.
2357
2367
*/
2358
- for (i = ZS_SIZE_CLASSES - 1 ; i >= 0 ; i -- ) {
2368
+ for (i = pool -> num_size_classes - 1 ; i >= 0 ; i -- ) {
2359
2369
int size ;
2360
2370
int pages_per_zspage ;
2361
2371
int objs_per_zspage ;
2362
2372
struct size_class * class ;
2363
2373
int fullness = 0 ;
2364
2374
2365
- size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA ;
2375
+ size = pool -> min_alloc_size + i * ZS_SIZE_CLASS_DELTA ;
2366
2376
if (size > ZS_MAX_ALLOC_SIZE )
2367
2377
size = ZS_MAX_ALLOC_SIZE ;
2368
- pages_per_zspage = get_pages_per_zspage (size );
2378
+ pages_per_zspage = get_pages_per_zspage (size , num_pages );
2369
2379
objs_per_zspage = pages_per_zspage * PAGE_SIZE / size ;
2370
2380
2371
2381
/*
@@ -2450,7 +2460,7 @@ void zs_destroy_pool(struct zs_pool *pool)
2450
2460
zs_flush_migration (pool );
2451
2461
zs_pool_stat_destroy (pool );
2452
2462
2453
- for (i = 0 ; i < ZS_SIZE_CLASSES ; i ++ ) {
2463
+ for (i = 0 ; i < pool -> num_size_classes ; i ++ ) {
2454
2464
int fg ;
2455
2465
struct size_class * class = pool -> size_class [i ];
2456
2466
@@ -2470,6 +2480,7 @@ void zs_destroy_pool(struct zs_pool *pool)
2470
2480
}
2471
2481
2472
2482
destroy_cache (pool );
2483
+ kfree (pool -> size_class );
2473
2484
kfree (pool -> name );
2474
2485
kfree (pool );
2475
2486
}
0 commit comments