水平有限,描述不当之处还请之处,转载请注明出处http://blog.csdn.net/vanbreaker/article/details/7671618

缓存回收对象基于以下原则

1.本地高速缓存的空间还可以容纳空闲对象,则直接将对象放回本地高速缓存

2.本地高速缓存的空间已满,则按batchcount的值将对象从本地高速缓存转移到slab中,转移是基于先进先出的原则的,也就是转移entry数组最前面的batchcount个空闲对象,因为这些对象在数组中存在的时间相对较长,不太可能仍然驻留在CPU高速缓存中

 

释放对象通过函数kmem_cache_free()来完成,下图给出了主要的工作流程

我们以__cache_free函数作为入口进行分析

static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
	struct array_cache *ac = cpu_cache_get(cachep);

	check_irq_off();
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));

	kmemcheck_slab_free(cachep, objp, obj_size(cachep));

	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
		return;

	/*如果本地高速缓存中的空闲对象小于空闲对象上限,则直接用entry中的元素记录对象的地址*/
	if (likely(ac->avail < ac->limit)) {
		STATS_INC_FREEHIT(cachep);
		ac->entry[ac->avail++] = objp;
		return;
	} else {/*否则将本地高速缓存中的空闲对象批量转移到slab中*/
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
		ac->entry[ac->avail++] = objp;
	}
}


 

static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
{
	int batchcount;
	struct kmem_list3 *l3;
	int node = numa_node_id();

	batchcount = ac->batchcount;
#if DEBUG
	BUG_ON(!batchcount || batchcount > ac->avail);
#endif
	check_irq_off();
	l3 = cachep->nodelists[node];
	spin_lock(&l3->list_lock);
	if (l3->shared) {/*如果开启了共享本地高速缓存*/
		/*获取共享的array_cache*/
		struct array_cache *shared_array = l3->shared;
		/*计算共享本地高速缓存还可容纳的空闲对象数*/
		int max = shared_array->limit - shared_array->avail;
		if (max) {
			if (batchcount > max)
				batchcount = max;
			/*将batchcount个对象移到共享本地高速缓存中*/
			memcpy(&(shared_array->entry[shared_array->avail]),
			       ac->entry, sizeof(void *) * batchcount);
			shared_array->avail += batchcount;
			goto free_done;
		}
	}

	/*将本地高速缓存的前batchcount个对象放回slab*/
	free_block(cachep, ac->entry, batchcount, node);
free_done:
#if STATS
	{
		int i = 0;
		struct list_head *p;

		p = l3->slabs_free.next;
		while (p != &(l3->slabs_free)) {
			struct slab *slabp;

			slabp = list_entry(p, struct slab, list);
			BUG_ON(slabp->inuse);

			i++;
			p = p->next;
		}
		STATS_SET_FREEABLE(cachep, i);
	}
#endif
	spin_unlock(&l3->list_lock);
	ac->avail -= batchcount;/*刷新本地高速缓存的avail值*/
	/*将从batchcount开始的元素搬到前面去*/
	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
}

/*
 * Release an obj back to its cache. If the obj has a constructed state, it must
 * be in this state _before_ it is released.  Called with disabled ints.
 */
static inline void __cache_free(struct kmem_cache *cachep, void *objp)
{
	struct array_cache *ac = cpu_cache_get(cachep);

	check_irq_off();
	kmemleak_free_recursive(objp, cachep->flags);
	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));

	kmemcheck_slab_free(cachep, objp, obj_size(cachep));

	/*
	 * Skip calling cache_free_alien() when the platform is not numa.
	 * This will avoid cache misses that happen while accessing slabp (which
	 * is per page memory  reference) to get nodeid. Instead use a global
	 * variable to skip the call, which is mostly likely to be present in
	 * the cache.
	 */
	if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
		return;

	/*如果本地高速缓存中的空闲对象小于空闲对象上限,则直接用entry中的元素记录对象的地址*/
	if (likely(ac->avail < ac->limit)) {
		STATS_INC_FREEHIT(cachep);
		ac->entry[ac->avail++] = objp;
		return;
	} else {/*否则将本地高速缓存中的空闲对象批量转移到slab中*/
		STATS_INC_FREEMISS(cachep);
		cache_flusharray(cachep, ac);
		ac->entry[ac->avail++] = objp;
	}
}

 


 

static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
		       int node)
{
	int i;
	struct kmem_list3 *l3;

	for (i = 0; i < nr_objects; i++) {
		void *objp = objpp[i];
		struct slab *slabp;

		/*通过对象的虚拟地址得到slab描述符*/
		slabp = virt_to_slab(objp);
		
		/*获取kmem_list3*/
		l3 = cachep->nodelists[node];
		
		/*先将slab从所在链表中删除*/
		list_del(&slabp->list);
		check_spinlock_acquired_node(cachep, node);
		check_slabp(cachep, slabp);
		
		/*将一个对象放回slab上*/
		slab_put_obj(cachep, slabp, objp, node);
		STATS_DEC_ACTIVE(cachep);
		
		/*kmem_list3中的空闲对象数加1*/
		l3->free_objects++;
		check_slabp(cachep, slabp);

		/* fixup slab chains */
		/*slab的对象全部空闲*/
		if (slabp->inuse == 0) {
			/*如果空闲对象数大于了空闲对象上限*/
			if (l3->free_objects > l3->free_limit) {
				/*总空闲对象数减去一个slab的对象数*/
				l3->free_objects -= cachep->num;
				/* No need to drop any previously held
				 * lock here, even if we have a off-slab slab
				 * descriptor it is guaranteed to come from
				 * a different cache, refer to comments before
				 * alloc_slabmgmt.
				 */
				 /*销毁该slab*/
				slab_destroy(cachep, slabp);
			} else {
				/*将该slab添加到free链表*/
				list_add(&slabp->list, &l3->slabs_free);
			}
		} else {/*否则添加到partial链表*/
			/* Unconditionally move a slab to the end of the
			 * partial list on free - maximum time for the
			 * other objects to be freed, too.
			 */
			list_add_tail(&slabp->list, &l3->slabs_partial);
		}
	}
}


 

static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
				void *objp, int nodeid)
{
	/*得到对象的偏移*/
	unsigned int objnr = obj_to_index(cachep, slabp, objp);

#if DEBUG
	/* Verify that the slab belongs to the intended node */
	WARN_ON(slabp->nodeid != nodeid);

	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
		printk(KERN_ERR "slab: double free detected in cache "
				"'%s', objp %p\n", cachep->name, objp);
		BUG();
	}
#endif
    /*bufctl数组的相应元素更新为free*/
	slab_bufctl(slabp)[objnr] = slabp->free;
	/*free更新为objnr*/
	slabp->free = objnr;
	/*非空闲数减1*/
	slabp->inuse--;
}


 

GitHub 加速计划 / li / linux-dash
10.39 K
1.2 K
下载
A beautiful web dashboard for Linux
最近提交(Master分支:2 个月前 )
186a802e added ecosystem file for PM2 4 年前
5def40a3 Add host customization support for the NodeJS version 4 年前
Logo

旨在为数千万中国开发者提供一个无缝且高效的云端环境,以支持学习、使用和贡献开源项目。

更多推荐