Skip site navigation (1)Skip section navigation (2)
Date:      Mon, 14 Nov 2005 12:37:45 GMT
From:      soc-bushman <soc-bushman@FreeBSD.org>
To:        Perforce Change Reviews <perforce@freebsd.org>
Subject:   PERFORCE change 86749 for review
Message-ID:  <200511141237.jAECbjAd049263@repoman.freebsd.org>

next in thread | raw e-mail | index | archive | help
http://perforce.freebsd.org/chv.cgi?CH=86749

Change 86749 by soc-bushman@soc-bushman_stinger on 2005/11/14 12:37:33

	comments were added for almost all files

Affected files ...

.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/Makefile#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/Makefile#11 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agent.c#3 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agent.h#3 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/Makefile.inc#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/group.c#3 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/group.h#3 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/hosts.c#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/hosts.h#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/passwd.c#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/passwd.h#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/services.c#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/services.h#6 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.8#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.conf#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.conf.5#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cachelib.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cachelib.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cacheplcs.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cacheplcs.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/config.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/config.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/debug.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/debug.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/hashtable.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/log.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/log.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/mp_rs_query.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/mp_rs_query.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/mp_ws_query.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/mp_ws_query.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/parser.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/parser.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/protocol.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/protocol.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/query.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/query.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/singletons.c#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/singletons.h#10 edit
.. //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/startup/cached#5 edit

Differences ...

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/Makefile#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/Makefile#11 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agent.c#3 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agent.h#3 (text+ko) ====

@@ -28,6 +28,13 @@
 #ifndef __CACHED_AGENT_H__
 #define __CACHED_AGENT_H__
 
+/*
+ * Agents are used to perform the actual lookups from the caching daemon.
+ * There are two types of daemons: for common requests and for multipart
+ * requests.
+ * All agents are stored in the agents table, which is the singleton.
+ */
+
 enum agent_type {
     COMMON_AGENT = 0,
     MULTIPART_AGENT = 1

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/Makefile.inc#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/group.c#3 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/group.h#3 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/hosts.c#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/hosts.h#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/passwd.c#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/passwd.h#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/services.c#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/agents/services.h#6 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.8#10 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.c#10 (text+ko) ====

@@ -91,6 +91,11 @@
 
 void get_time_func(struct timeval *);
 	
+/*
+ * We use our own routines to get the time (in seconds) that has elapsed
+ * since the daemon startup. This approach should probably be changed in
+ * future.
+ */
 static void *
 get_time_thread(void *args)
 {
@@ -181,8 +186,8 @@
 	for (i = 0; i < size; ++i) {
 		config_entry = configuration_get_entry(config, i);
 	    	/* 
-	    	 * we should register common entries now - multipart entries
-	    	 * would be registered automatically during the queries
+	    	 * We should register common entries now - multipart entries
+	    	 * would be registered automatically during the queries.
 	    	 */
 		res = register_cache_entry(retval, (struct cache_entry_params *)
 			&config_entry->positive_cache_params);		
@@ -203,6 +208,10 @@
 	TRACE_OUT(destroy_cache_);
 }
 
+/*
+ * Socket and kqueues are prepared here. We have one global queue for both
+ * socket and timers events.
+ */
 static struct runtime_env *
 init_runtime_env(struct configuration *config)
 {
@@ -360,6 +369,14 @@
 	}
 	qstate = (struct query_state *)event_data->udata;	
 	
+	/*
+	 * If the buffer that is to be send/received is too large,
+	 * we send it implicitly, by using query_io_buffer_read and
+	 * query_io_buffer_write functions in the query_state. These functions
+	 * use the temporary buffer, which is later send/received in parts.
+	 * The code below implements buffer splitting/mergind for send/receive
+	 * operations. It also does the actual socket IO operations.
+	 */
 	if (((qstate->use_alternate_io == 0) && 
 		(qstate->kevent_watermark <= event_data->data)) ||
 		((qstate->use_alternate_io != 0) && 
@@ -425,6 +442,7 @@
 		return;
 	}
 
+	/* updating the query_state lifetime variable */
 	get_time_func(&query_timeout);
 	query_timeout.tv_usec = 0;
 	query_timeout.tv_sec -= qstate->creation_time.tv_sec;
@@ -439,6 +457,15 @@
 		qstate->use_alternate_io = 0;					
 		
 	if (qstate->use_alternate_io == 0) {
+		/*
+		 * If we must send/receive the large block of data,
+		 * we should prepare the query_state's io_XXX fields.
+		 * We should also substitute its write_func and read_func
+		 * with the query_io_buffer_write and query_io_buffer_read,
+		 * which will allow us to implicitly send/receive this large 
+		 * buffer later (in the subsequent calls to the 
+		 * process_socket_event).
+		 */
 		if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
 			if (qstate->io_buffer != NULL)
 				free(qstate->io_buffer);
@@ -492,6 +519,10 @@
 	TRACE_OUT(process_socket_event);
 }
 
+/*
+ * This routine is called if timer event has been signaled in the kqueue. It
+ * just closes the socket and destroys the query_state.
+ */
 static void
 process_timer_event(struct kevent *event_data, struct runtime_env *env,
 	struct configuration *config)
@@ -507,6 +538,10 @@
 	TRACE_OUT(process_timer_event);	
 }
 
+/*
+ * Processing loop is the basic processing routine, that forms a body of each
+ * procssing thread
+ */
 static void
 processing_loop(cache the_cache, struct runtime_env *env,
 	struct configuration *config)
@@ -524,10 +559,13 @@
 		nevents = kevent(env->queue, NULL, 0, eventlist, 
 	    		eventlist_size, NULL);
 		
+		/* 
+		 * we can only receive 1 event on success
+		 */
 		if (nevents == 1) {
 			struct kevent *event_data;
 			event_data = &eventlist[0];
-			
+					
 			if (event_data->ident == env->sockfd)
 				accept_connection(event_data, env, config);
 			else {
@@ -553,6 +591,10 @@
 	TRACE_MSG("<= processing_loop");
 }
 
+/*
+ * Wrapper above the processing loop function. It sets the thread signal mask
+ * to avoid SIGPIPE signals (which can happen if the client works incorrectly).
+ */
 static void *
 processing_thread(void *data)
 {
@@ -584,6 +626,14 @@
 	pthread_rwlock_unlock(&s_time_lock);
 }
 
+/*
+ * The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
+ * search for this symbol in the executable. This symbol is the attribute of
+ * the caching daemon. So, if it exists, nsdispatch won't try to connect to
+ * the caching daemon and will just ignore the 'cache' source in the
+ * nsswitch.conf. This method helps to avoid cycles and organize 
+ * self-performing requests.
+ */
 void
 _nss_cache_cycle_prevention_function(void)
 {

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.conf#10 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cached.conf.5#10 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cachelib.c#10 (text+ko) ====

@@ -63,6 +63,9 @@
 static int ht_items_cmp_func(const void *, const void *);
 static hashtable_index_t ht_item_hash_func(const void *, size_t);	
 		
+/*
+ * Hashing and comparing routines, that are used with the hash tables
+ */
 static int
 ht_items_cmp_func(const void *p1, const void *p2)
 {	
@@ -102,9 +105,13 @@
 HASHTABLE_GENERATE(cache_ht_, cache_ht_item_, struct cache_ht_item_data_, data,
 	ht_item_hash_func, ht_items_cmp_func);
 
+/*
+ * Routines to sort and search the entries by name
+ */
 static int
 entries_bsearch_cmp_func(const void *key, const void *ent)
 {
+	
 	assert(key != NULL);
 	assert(ent != NULL);
 	
@@ -115,6 +122,7 @@
 static int
 entries_qsort_cmp_func(const void *e1, const void *e2)
 {
+	
 	assert(e1 != NULL);
 	assert(e2 != NULL);
 	
@@ -125,6 +133,7 @@
 static struct cache_entry_ **
 find_cache_entry_p(struct cache_ *the_cache, const char *entry_name)
 {
+	
 	return ((struct cache_entry_ **)(bsearch(entry_name, the_cache->entries, 
 		the_cache->entries_size, sizeof(struct cache_entry_ *), 
 		entries_bsearch_cmp_func)));	
@@ -133,6 +142,7 @@
 static void
 destroy_cache_mp_write_session(struct cache_mp_write_session_ *ws)
 {
+	
 	struct cache_mp_data_item_	*data_item;	
 		
 	TRACE_IN(destroy_cache_mp_write_session);
@@ -151,6 +161,7 @@
 static void
 destroy_cache_mp_read_session(struct cache_mp_read_session_ *rs)
 {
+	
 	TRACE_IN(destroy_cache_mp_read_session);
 	assert(rs != NULL);
 	free(rs);
@@ -184,6 +195,7 @@
 		
 		HASHTABLE_DESTROY(&(common_entry->items), data);
 		
+		/* FIFO policy is always first */
 		destroy_cache_fifo_policy(common_entry->policies[0]);		
 		switch (common_entry->common_params.policy) {
 		case CPT_LRU:
@@ -283,22 +295,35 @@
 	}
 }
 
+/*
+ * When passed to the flush_cache_policy, ensures that all old elements are
+ * deleted.
+ */
 static int
 cache_lifetime_common_continue_func(struct cache_common_entry_ *entry, 
 	struct cache_policy_item_ *item)
 {
+	
 	return ((item->last_request_time.tv_sec - item->creation_time.tv_sec > 
-		entry->common_params.max_lifetime.tv_sec) ? 	1: 0);
+		entry->common_params.max_lifetime.tv_sec) ? 1: 0);
 }
 
+/*
+ * When passed to the flush_cache_policy, ensures that all elements, that
+ * exceed the size limit, are deleted.
+ */
 static int
 cache_elemsize_common_continue_func(struct cache_common_entry_ *entry, 
 	struct cache_policy_item_ *item)
 {
+	
 	return ((entry->items_size > entry->common_params.satisf_elemsize) ? 1 
     		: 0);
 }
 
+/*
+ * Removes the elements from the cache entry, while the continue_func returns 1.
+ */
 static void
 flush_cache_policy(struct cache_common_entry_ *entry, 
 	struct cache_policy_ *policy,
@@ -402,7 +427,6 @@
 		}
 	}
 }
-/* static stuff implementation - end */
 
 struct cache_ *
 init_cache(struct cache_params const *params)
@@ -435,6 +459,7 @@
 void	
 destroy_cache(struct cache_ *the_cache)
 {
+	
 	TRACE_IN(destroy_cache);
 	assert(the_cache != NULL);
 	
@@ -628,6 +653,14 @@
 	}
 }
 
+/*
+ * Tries to read the element with the specified key from the cache. If the
+ * value_size is too small, it will be filled with the proper number, and 
+ * the user will need to call cache_read again with the value buffer, that
+ * is large enough.
+ * Function returns 0 on success, -1 on error, and -2 if the value_size is too
+ * small.
+ */
 int	
 cache_read(struct cache_entry_ *entry, const char *key, size_t key_size,
 	char *value, size_t *value_size)
@@ -725,6 +758,10 @@
 	return (0);
 }
 
+/*
+ * Writes the value with the specified key into the cache entry.
+ * Functions returns 0 on success, and -1 on error.
+ */
 int	
 cache_write(struct cache_entry_ *entry, const char *key, size_t key_size,
     	char const *value, size_t value_size)
@@ -820,6 +857,14 @@
 	return (0);
 }
 
+/*
+ * Initializes the write session for the specified multipart entry. This
+ * session then should be filled with data either committed or abandoned by
+ * using close_cache_mp_write_session or abandon_cache_mp_write_session
+ * respectively.
+ * Returns NULL on errors (when there are too many opened write sessions for
+ * the entry).
+ */
 struct cache_mp_write_session_ *
 open_cache_mp_write_session(struct cache_entry_ *entry)
 {
@@ -852,6 +897,10 @@
 	return (retval);	
 }
 
+/*
+ * Writes data to the specified session. Return 0 on success and -1 on errors
+ * (when write session size limit is exceeded).
+ */
 int
 cache_mp_write(struct cache_mp_write_session_ *ws, char *data, 
 	size_t data_size)
@@ -886,9 +935,13 @@
 	return (0);
 }
 
+/*
+ * Abandons the write session and frees all the connected resources.
+ */
 void
 abandon_cache_mp_write_session(struct cache_mp_write_session_ *ws)
 {
+	
 	TRACE_IN(abandon_cache_mp_write_session);
 	assert(ws != NULL);
 	assert(ws->parent_entry != NULL);
@@ -901,9 +954,13 @@
 	TRACE_OUT(abandon_cache_mp_write_session);
 }
 
+/*
+ * Commits the session to the entry, for which it was created.
+ */
 void
 close_cache_mp_write_session(struct cache_mp_write_session_ *ws)
 {
+	
 	TRACE_IN(close_cache_mp_write_session);
 	assert(ws != NULL);
 	assert(ws->parent_entry != NULL);
@@ -913,10 +970,18 @@
 	--ws->parent_entry->ws_size;
 	
 	if (ws->parent_entry->completed_write_session == NULL) {
+		/* 
+		 * If there is no completed session yet, this will be the one 
+		 */
 		ws->parent_entry->get_time_func(
 	    		&ws->parent_entry->creation_time);
 		ws->parent_entry->completed_write_session = ws;
 	} else {
+		/* 
+		 * If there is a completed session, then we'll save our session
+		 * as a pending session. If there is already a pending session,
+		 * it would be destroyed.
+		 */
 		if (ws->parent_entry->pending_write_session != NULL)
 			destroy_cache_mp_write_session(
 				ws->parent_entry->pending_write_session);
@@ -926,6 +991,10 @@
 	TRACE_OUT(close_cache_mp_write_session);
 }
 
+/*
+ * Opens read session for the specified entry. Returns NULL on errors (when
+ * there are no data in the entry, or the data are obsolete).
+ */
 struct cache_mp_read_session_ *
 open_cache_mp_read_session(struct cache_entry_ *entry)
 {
@@ -970,9 +1039,16 @@
 	return (retval);
 }
 
+/*
+ * Reads the data from the read session - step by step. 
+ * Returns 0 on success, -1 on error (when there are no more data), and -2 if
+ * the data_size is too small.  In the last case, data_size would be filled
+ * the proper value.
+ */
 int
 cache_mp_read(struct cache_mp_read_session_ *rs, char *data, size_t *data_size)
 {
+	
 	TRACE_IN(cache_mp_read);
 	assert(rs != NULL);
 	
@@ -1000,9 +1076,15 @@
 	return (0);
 }
 
+/*
+ * Closes the read session. If there are no more read sessions and there is
+ * a pending write session, it will be committed and old 
+ * completed_write_session will be destroyed.
+ */
 void
 close_cache_mp_read_session(struct cache_mp_read_session_ *rs)
 {
+	
 	TRACE_IN(close_cache_mp_read_session);
 	assert(rs != NULL);
 	assert(rs->parent_entry != NULL);
@@ -1023,6 +1105,9 @@
 	TRACE_OUT(close_cache_mp_read_session);
 }
 
+/*
+ * Transforms all cache entries, by applying the specified transformation.
+ */
 int 
 transform_cache(struct cache_ *the_cache, 
 	enum cache_transformation_t transformation)
@@ -1049,6 +1134,7 @@
 transform_cache_entry(struct cache_entry_ *entry, 
 	enum cache_transformation_t transformation)
 {
+	
 	switch (transformation) {
 	case CTT_CLEAR:
 		clear_cache_entry(entry);

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cachelib.h#10 (text+ko) ====

@@ -76,7 +76,7 @@
 	char	*entry_name;	
 };
 
-/* params, used for most entrues */
+/* params, used for most entries */
 struct common_cache_entry_params
 {
 	/* inherited fields */
@@ -87,7 +87,9 @@
 	size_t	cache_entries_size;
 	
 	size_t	max_elemsize;		/* if 0 then no check is made */
-	size_t	satisf_elemsize;	
+	size_t	satisf_elemsize;	/* if entry size is exceeded, 
+					 * this number of elements will be left, 
+					 * others will be deleted */
 	struct timeval	max_lifetime;	/* if 0 then no check is made */
 	enum cache_policy_t policy;	/* policy used for transformations */
 };
@@ -139,6 +141,12 @@
 	HASHTABLE_HEAD(cache_ht_, cache_ht_item_) items;
 	size_t items_size;
 	
+	/* 
+	 * Entry always has the FIFO policy, that is used to eliminate old
+	 * elements (the ones, with lifetime more than max_lifetime). Besides,
+	 * user can specify another policy to be applied, when there are too
+	 * many elements in the entry. So policies_size can be 1 or 2.
+	 */
 	struct cache_policy_ ** policies;
 	size_t policies_size;
 	
@@ -156,6 +164,10 @@
 {
 	struct cache_mp_entry_	*parent_entry;
 	
+	/* 
+	 * All items are accumulated in this queue. When the session is
+	 * committed, they all will be copied to the multipart entry.
+	 */
 	TAILQ_HEAD(cache_mp_data_item_head, cache_mp_data_item_) items;
 	size_t	items_size;
 
@@ -177,12 +189,23 @@
 
 	struct mp_cache_entry_params mp_params;
 		
+	/* All opened write sessions */
 	TAILQ_HEAD(write_sessions_head, cache_mp_write_session_) ws_head;
 	size_t	ws_size;
 	
+	/* All opened read sessions */
 	TAILQ_HEAD(read_sessions_head, cache_mp_read_session_) rs_head;
 	size_t	rs_size;
 
+	/* 
+	 * completed_write_session is the committed write sessions. All read
+	 * sessions use data from it. If the completed_write_session is out of
+	 * date, but still in use by some of the read sessions, the newly
+	 * committed write session is stored in the pending_write_session.
+	 * In such a case, completed_write_session will be substituted with 
+	 * pending_write_session as soon as it won't be used by any of
+	 * the read sessions.
+	 */
 	struct cache_mp_write_session_	*completed_write_session;
 	struct cache_mp_write_session_	*pending_write_session;
 	struct timeval	creation_time;
@@ -211,6 +234,11 @@
 #define INVALID_CACHE_MP_WRITE_SESSION	(NULL)
 #define INVALID_CACHE_MP_READ_SESSION	(NULL)
 
+/*
+ * NOTE: all cache operations are thread-unsafe. You must ensure thread-safety
+ * externally, by yourself.
+ */
+
 /* cache initialization/destruction routines */
 extern cache init_cache(struct cache_params const *);
 extern void destroy_cache(cache);
@@ -234,10 +262,7 @@
 extern int cache_mp_read(cache_mp_read_session, char *, size_t *);
 extern void close_cache_mp_read_session(cache_mp_read_session);
 
-/* 
- * transforms the specified cache entry, or all entries if 
- * the entry_name is NULL 
- */
+/* transformation routines */
 extern int transform_cache(cache, enum cache_transformation_t);
 extern int transform_cache_entry(cache_entry, enum cache_transformation_t);
 

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cacheplcs.c#10 (text+ko) ====

@@ -67,7 +67,12 @@
 static void destroy_cache_queue_policy(struct cache_queue_policy_ *);		
 static struct cache_queue_policy_ *init_cache_queue_policy(void);
 		
-/* queue common policy implementation */
+/*
+ * All cache_queue_policy_XXX functions below will be used to fill
+ * the cache_queue_policy structure. They implement the most functionality of
+ * LRU and FIFO policies. LRU and FIFO policies are actually the
+ * cache_queue_policy_ with cache_update_item function changed.
+ */
 static struct cache_policy_item_ *
 cache_queue_policy_create_item()
 {
@@ -175,6 +180,10 @@
 		cache_queue_policy_head_, entries));
 }
 
+/*
+ * Initializes cache_queue_policy_ by filling the structure with the functions
+ * pointers, defined above
+ */
 static struct cache_queue_policy_ *
 init_cache_queue_policy(void)
 {
@@ -222,7 +231,11 @@
 	TRACE_OUT(destroy_cache_queue_policy);
 }
 
-/* fifo policy specific part */
+/*
+ * Makes cache_queue_policy_ behave like FIFO policy - we don't do anything,
+ * when the cache element is updated. So it always stays in its initial
+ * position in the queue - that is exactly the FIFO functionality.
+ */
 static void
 cache_fifo_policy_update_item(struct cache_policy_ *policy, 
 	struct cache_policy_item_ *item)
@@ -257,7 +270,11 @@
 	TRACE_OUT(destroy_cache_fifo_policy);
 }
 
-/* lru policy specific part */
+/*
+ * Makes cache_queue_policy_ behave like LRU policy. On each update, cache
+ * element is moved to the end of the queue - so it would be deleted in last
+ * turn. That is exactly the LRU policy functionality.
+ */
 static void
 cache_lru_policy_update_item(struct cache_policy_ *policy, 
 	struct cache_policy_item_ *item)
@@ -298,7 +315,16 @@
 	TRACE_OUT(destroy_cache_lru_policy);
 }
 
-/* lfu policy implementation */
+/*
+ * LFU (least frequently used) policy implementation differs much from the
+ * LRU and FIFO (both based on cache_queue_policy_). Almost all cache_policy_
+ * functions are implemented specifically for this policy. The idea of this
+ * policy is to represent frequency (real number) as the integer number and
+ * use it as the index in the array. Each array's element is 
+ * the list of elements. For example, if we have the 100-elements
+ * array for this policy, the elements with frequency 0.1 (calls per-second)
+ * would be in 10th element of the array. 
+ */
 static struct cache_policy_item_ *
 cache_lfu_policy_create_item(void)
 {
@@ -317,12 +343,17 @@
 static void
 cache_lfu_policy_destroy_item(struct cache_policy_item_ *item)
 {
+	
 	TRACE_IN(cache_lfu_policy_destroy_item);
 	assert(item != NULL);
 	free(item);
 	TRACE_OUT(cache_lfu_policy_destroy_item);
 }
 
+/*
+ * When placed in the LFU policy queue for the first time, the maximum
+ * frequency is assigned to the element
+ */
 static void
 cache_lfu_policy_add_item(struct cache_policy_ *policy, 
 	struct cache_policy_item_ *item)
@@ -340,6 +371,10 @@
 	TRACE_OUT(cache_lfu_policy_add_item);
 }
 
+/*
+ * On each update the frequency of the element is recalculated and, if it
+ * changed, the element would be moved to the another place in the array.
+ */
 static void
 cache_lfu_policy_update_item(struct cache_policy_ *policy, 
 	struct cache_policy_item_ *item)
@@ -352,12 +387,21 @@
 	lfu_policy = (struct cache_lfu_policy_ *)policy;
 	lfu_item = (struct cache_lfu_policy_item_ *)item;
 
+	/* 
+	 * We calculate the square of the request_count to avoid grouping of
+	 * all elements at the start of the array (for example, if array size is
+	 * 100 and most of its elements has frequency below the 0.01, they
+	 * all would be grouped in the first array's position). Other
+	 * techniques should be used here later to ensure, that elements are
+	 * equally distributed  in the array and not grouped in its beginning.
+	 */
 	if (lfu_item->parent_data.last_request_time.tv_sec != 
 		lfu_item->parent_data.creation_time.tv_sec) {
-		index = ((double)lfu_item->parent_data.request_count / 
+		index = ((double)lfu_item->parent_data.request_count *
+			(double)lfu_item->parent_data.request_count / 
 			(lfu_item->parent_data.last_request_time.tv_sec - 
-				lfu_item->parent_data.creation_time.tv_sec)) *
-				CACHELIB_MAX_FREQUENCY;
+			    lfu_item->parent_data.creation_time.tv_sec + 1)) *
+			    CACHELIB_MAX_FREQUENCY;
 		if (index >= CACHELIB_MAX_FREQUENCY)
 			index = CACHELIB_MAX_FREQUENCY - 1;
 	} else
@@ -481,6 +525,10 @@
 	return ((struct cache_policy_item_ *)lfu_item);
 }
 
+/*
+ * Initializes the cache_policy_ structure by filling it with appropriate
+ * functions pointers
+ */
 struct cache_policy_ *
 init_cache_lfu_policy()
 {

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/cacheplcs.h#10 (text+ko) ====

@@ -35,6 +35,11 @@
 /* common policy definitions */
 #define CACHELIB_MAX_FREQUENCY 100
 
+/*
+ * cache_policy_item_ represents some abstract cache element in the policy 
+ * queue. connected_item pointers to the corresponding cache_policy_item_ in 
+ * another policy queue.
+ */
 struct cache_policy_item_
 {
 	char	*key;
@@ -47,6 +52,10 @@
 	struct cache_policy_item_ *connected_item;
 };
 
+/*
+ * cache_policy_ represents an abstract policy queue. It can be customized by
+ * setting appropriate function pointers
+ */
 struct cache_policy_
 {
 	struct cache_policy_item_* (*create_item_func)();
@@ -69,6 +78,9 @@
 		struct cache_policy_ *, struct cache_policy_item_ *);
 };
 
+/*
+ * LFU cache policy item "inherited" from cache_policy_item_ structure
+ */
 struct cache_lfu_policy_item_
 {
 	struct cache_policy_item_ parent_data;
@@ -79,18 +91,27 @@
 
 TAILQ_HEAD(cache_lfu_policy_group_, cache_lfu_policy_item_);
 
+/*
+ * LFU policy queue "inherited" from cache_policy_.
+ */
 struct cache_lfu_policy_
 {
 	struct cache_policy_ parent_data;
 	struct cache_lfu_policy_group_ groups[CACHELIB_MAX_FREQUENCY];
 };
 
+/*
+ * LRU and FIFO policies item "inherited" from cache_policy_item_
+ */
 struct cache_queue_policy_item_
 {
 	struct cache_policy_item_ parent_data;
 	TAILQ_ENTRY(cache_queue_policy_item_) entries;
 };
 
+/*
+ * LRU and FIFO policies "inherited" from cache_policy_
+ */
 struct cache_queue_policy_
 {
 	struct cache_policy_ parent_data;

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/config.c#10 (text+ko) ====

@@ -34,10 +34,9 @@
 #include "debug.h"
 #include "log.h"
 
-#define INITIAL_ENTRIES_CAPACITY	8
-#define DEFAULT_SOCKET_PATH		"/var/run/cached"
-#define DEFAULT_PIDFILE_PATH		"/var/run/cached.pid"
-
+/*
+ * Default entries, which always exist in the configuration
+ */
 const char *c_default_entries[6] = {
 	NSDB_PASSWD, 
 	NSDB_GROUP, 
@@ -169,6 +168,9 @@
 	return (retval);		
 }
 
+/*
+ * Creates configuration entry and fills it with default values
+ */
 struct configuration_entry *
 create_def_configuration_entry(const char *name)
 {
@@ -183,16 +185,16 @@
 		sizeof(struct common_cache_entry_params));
 	positive_params.entry_type = CET_COMMON;
 	positive_params.cache_entries_size = DEFAULT_CACHE_HT_SIZE;
-	positive_params.max_elemsize = 2048;
-	positive_params.satisf_elemsize = 1024;
-	positive_params.max_lifetime.tv_sec = 60 * 60 * 12;
+	positive_params.max_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE;
+	positive_params.satisf_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE / 2;
+	positive_params.max_lifetime.tv_sec = DEFAULT_POSITIVE_LIFETIME;
 	positive_params.policy = CPT_LRU;
 		
 	memcpy(&negative_params, &positive_params, 
 		sizeof(struct common_cache_entry_params));
-	negative_params.max_elemsize = 512;
-	negative_params.satisf_elemsize = 256;
-	negative_params.max_lifetime.tv_sec = 60 * 60;
+	negative_params.max_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE;
+	negative_params.satisf_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE / 2;
+	negative_params.max_lifetime.tv_sec = DEFAULT_NEGATIVE_LIFETIME;
 	negative_params.policy = CPT_FIFO;
 			
 	memset(&default_common_timeout, 0, sizeof(struct timeval));
@@ -204,9 +206,9 @@
 	memset(&mp_params, 0,
 		sizeof(struct mp_cache_entry_params));
 	mp_params.entry_type = CET_MULTIPART;
-	mp_params.max_elemsize = 1024 * 8;
-	mp_params.max_sessions = 1024;
-	mp_params.max_lifetime.tv_sec = 60 * 60 * 12;
+	mp_params.max_elemsize = DEFAULT_MULTIPART_ELEMENTS_SIZE;
+	mp_params.max_sessions = DEFAULT_MULITPART_SESSIONS_SIZE;
+	mp_params.max_lifetime.tv_sec = DEFAULT_MULITPART_LIFETIME;
 			
 	res = create_configuration_entry(name, &default_common_timeout, 
 		&default_mp_timeout, &positive_params, &negative_params, 
@@ -306,6 +308,9 @@
 	return ((retval != NULL) ? *retval : NULL);
 }
 
+/* 
+ * Configuration entry uses rwlock to handle access to its fields.
+ */
 void
 configuration_lock_rdlock(struct configuration *config)
 {
@@ -330,6 +335,11 @@
     TRACE_OUT(configuration_unlock);
 }
 
+/*
+ * Configuration entry uses 3 mutexes to handle cache operations. They are
+ * acquired by configuration_lock_entry and configuration_unlock_entry
+ * functions.
+ */
 void
 configuration_lock_entry(struct configuration_entry *entry,
 	enum config_entry_lock_type lock_type)
@@ -425,12 +435,12 @@
 	memset(config->pidfile_path, 0, len + 1);
 	memcpy(config->pidfile_path, DEFAULT_PIDFILE_PATH, len);
 	
-	config->socket_mode =  S_IFSOCK | S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP | 
-		S_IROTH | S_IWOTH;
+	config->socket_mode =  S_IFSOCK | S_IRUSR | S_IWUSR | 
+		S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
 	config->force_unlink = 1;
 	
-	config->query_timeout = 8;
-	config->threads_num = 8;
+	config->query_timeout = DEFAULT_QUERY_TIMEOUT;
+	config->threads_num = DEFAULT_THREADS_NUM;
 
 	for (i = 0; i < config->entries_size; ++i)
 		destroy_configuration_entry(config->entries[i]);		

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/config.h#10 (text+ko) ====

@@ -35,17 +35,46 @@
 #include <unistd.h>
 #include "cachelib.h"
 
+#define DEFAULT_QUERY_TIMEOUT		8
+#define DEFAULT_THREADS_NUM		8
+
 #define DEFAULT_COMMON_ENTRY_TIMEOUT	10
 #define DEFAULT_MP_ENTRY_TIMEOUT	60
 #define DEFAULT_CACHE_HT_SIZE		257
 
+#define INITIAL_ENTRIES_CAPACITY	8
+#define DEFAULT_SOCKET_PATH		"/var/run/cached"
+#define DEFAULT_PIDFILE_PATH		"/var/run/cached.pid"
+
+#define DEFAULT_POSITIVE_ELEMENTS_SIZE	(2048)
+#define DEFAULT_POSITIVE_LIFETIME 	(60 * 60 * 12)
+
+#define DEFAULT_NEGATIVE_ELEMENTS_SIZE	(512)
+#define DEFAULT_NEGATIVE_LIFETIME	(60 * 60)
+
+#define DEFAULT_MULTIPART_ELEMENTS_SIZE	(1024 * 8)
+#define DEFAULT_MULITPART_SESSIONS_SIZE	(1024)
+#define DEFAULT_MULITPART_LIFETIME	(60 * 60 * 12)
+
 extern const char *c_default_entries[6];
 
+/*
+ * Configuration entry represents the details of each cache entry in the
+ * config file (i.e. passwd or group). Its purpose also is to acquire locks
+ * of three different types (for usual read/write caching, for multipart 
+ * caching and for caching of the negative results) for that cache entry.
+ */
 struct configuration_entry {
 	struct common_cache_entry_params positive_cache_params;
 	struct common_cache_entry_params negative_cache_params;
 	struct mp_cache_entry_params mp_cache_params;
-		
+	
+	/* 
+	 * configuration_entry holds pointers for all actual cache_entries,
+	 * which are used for it. There is one for positive caching, one for
+	 * for negative caching, and several (one per each euid/egid) for
+	 * multipart caching.
+	 */
 	cache_entry positive_cache_entry;
 	cache_entry negative_cache_entry;
 	
@@ -64,7 +93,9 @@
 	int	enabled;
 };
 
-/* the cached configuration parameters */
+/*
+ * Contains global configuration options and array of all configuration entries
+ */
 struct configuration {
 	char	*pidfile_path;
 	char	*socket_path;		

==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/debug.c#10 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/debug.h#10 (text+ko) ====


==== //depot/projects/soc2005/nsswitch_cached/release/cached-0.2/cached/hashtable.h#10 (text+ko) ====


>>> TRUNCATED FOR MAIL (1000 lines) <<<



Want to link to this message? Use this URL: <https://mail-archive.FreeBSD.org/cgi/mid.cgi?200511141237.jAECbjAd049263>