1
0
mirror of https://git.FreeBSD.org/src.git synced 2025-01-01 12:19:28 +00:00

Removing obsolete cached files after cached->nscd renaming.

Approved by:	re (kensmith), brooks (mentor)
This commit is contained in:
Michael Bushkov 2007-10-02 07:51:43 +00:00
parent 59c6813475
commit 1035d0cb65
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=172413
39 changed files with 0 additions and 10193 deletions

View File

@ -1,32 +0,0 @@
#!/bin/sh
#
# $FreeBSD$
#
# PROVIDE: cached
# REQUIRE: DAEMON
# BEFORE: LOGIN
#
# Add the following lines to /etc/rc.conf to enable cached:
#
# cached_enable="YES"
#
# See cached(8) for flags
#
. /etc/rc.subr
name=cached
rcvar=`set_rcvar`
command=/usr/sbin/cached
extra_commands="flush"
flush_cmd="${command} -I all"
cached_enable=${cached_enable:-"NO"}
cached_pidfile=${cached_pidfile:-"/var/run/cached.pid"}
cached_flags=${cached_flags:-""}
load_rc_config $name
run_rc_command "$1"

View File

@ -1,16 +0,0 @@
# $FreeBSD$
PROG= cached
MAN= cached.conf.5 cached.8
WARNS?= 2
SRCS= agent.c cached.c cachedcli.c cachelib.c cacheplcs.c debug.c log.c \
config.c query.c mp_ws_query.c mp_rs_query.c singletons.c protocol.c \
parser.c
CFLAGS+= -DCONFIG_PATH="\"${PREFIX}/etc/cached.conf\""
DPADD= ${LIBM} ${LIBPTHREAD} ${LIBUTIL}
LDADD= -lm -lpthread -lutil
.PATH: ${.CURDIR}/agents
.include "${.CURDIR}/agents/Makefile.inc"
.include <bsd.prog.mk>

View File

@ -1,127 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <string.h>
#include <stdlib.h>
#include "agent.h"
#include "debug.h"
static int
agent_cmp_func(const void *a1, const void *a2)
{
struct agent const *ap1 = *((struct agent const **)a1);
struct agent const *ap2 = *((struct agent const **)a2);
int res;
res = strcmp(ap1->name, ap2->name);
if (res == 0) {
if (ap1->type == ap2->type)
res = 0;
else if (ap1->type < ap2->type)
res = -1;
else
res = 1;
}
return (res);
}
struct agent_table *
init_agent_table()
{
struct agent_table *retval;
TRACE_IN(init_agent_table);
retval = (struct agent_table *)malloc(sizeof(struct agent_table));
assert(retval != NULL);
memset(retval, 0, sizeof(struct agent_table));
TRACE_OUT(init_agent_table);
return (retval);
}
void
register_agent(struct agent_table *at, struct agent *a)
{
struct agent **new_agents;
size_t new_agents_num;
TRACE_IN(register_agent);
assert(at != NULL);
assert(a != NULL);
new_agents_num = at->agents_num + 1;
new_agents = (struct agent **)malloc(sizeof(struct agent *) *
new_agents_num);
assert(new_agents != NULL);
memcpy(new_agents, at->agents, at->agents_num * sizeof(struct agent *));
new_agents[new_agents_num - 1] = a;
qsort(new_agents, new_agents_num, sizeof(struct agent *),
agent_cmp_func);
free(at->agents);
at->agents = new_agents;
at->agents_num = new_agents_num;
TRACE_OUT(register_agent);
}
struct agent *
find_agent(struct agent_table *at, const char *name, enum agent_type type)
{
struct agent **res;
struct agent model, *model_p;
TRACE_IN(find_agent);
model.name = (char *)name;
model.type = type;
model_p = &model;
res = bsearch(&model_p, at->agents, at->agents_num,
sizeof(struct agent *), agent_cmp_func);
TRACE_OUT(find_agent);
return ( res == NULL ? NULL : *res);
}
void
destroy_agent_table(struct agent_table *at)
{
size_t i;
TRACE_IN(destroy_agent_table);
assert(at != NULL);
for (i = 0; i < at->agents_num; ++i) {
free(at->agents[i]->name);
free(at->agents[i]);
}
free(at->agents);
free(at);
TRACE_OUT(destroy_agent_table);
}

View File

@ -1,72 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_AGENT_H__
#define __CACHED_AGENT_H__
/*
* Agents are used to perform the actual lookups from the caching daemon.
* There are two types of daemons: for common requests and for multipart
* requests.
* All agents are stored in the agents table, which is the singleton.
*/
enum agent_type {
COMMON_AGENT = 0,
MULTIPART_AGENT = 1
};
struct agent {
char *name;
enum agent_type type;
};
struct common_agent {
struct agent parent;
int (*lookup_func)(const char *, size_t, char **, size_t *);
};
struct multipart_agent {
struct agent parent;
void *(*mp_init_func)();
int (*mp_lookup_func)(char **, size_t *, void *);
void (*mp_destroy_func)(void *);
};
struct agent_table {
struct agent **agents;
size_t agents_num;
};
extern struct agent_table *init_agent_table();
extern void register_agent(struct agent_table *, struct agent *);
extern struct agent *find_agent(struct agent_table *, const char *,
enum agent_type);
extern void destroy_agent_table(struct agent_table *);
#endif

View File

@ -1,3 +0,0 @@
# $FreeBSD$
SRCS += passwd.c group.c services.c

View File

@ -1,262 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/types.h>
#include <assert.h>
#include <nsswitch.h>
#include <grp.h>
#include <string.h>
#include <stdlib.h>
#include "../debug.h"
#include "passwd.h"
static int group_marshal_func(struct group *, char *, size_t *);
static int group_lookup_func(const char *, size_t, char **, size_t *);
static void *group_mp_init_func();
static int group_mp_lookup_func(char **, size_t *, void *);
static void group_mp_destroy_func(void *);
static int
group_marshal_func(struct group *grp, char *buffer, size_t *buffer_size)
{
struct group new_grp;
size_t desired_size, size, mem_size;
char *p, **mem;
TRACE_IN(group_marshal_func);
desired_size = ALIGNBYTES + sizeof(struct group) + sizeof(char *);
if (grp->gr_name != NULL)
desired_size += strlen(grp->gr_name) + 1;
if (grp->gr_passwd != NULL)
desired_size += strlen(grp->gr_passwd) + 1;
if (grp->gr_mem != NULL) {
mem_size = 0;
for (mem = grp->gr_mem; *mem; ++mem) {
desired_size += strlen(*mem) + 1;
++mem_size;
}
desired_size += ALIGNBYTES + (mem_size + 1) * sizeof(char *);
}
if ((desired_size > *buffer_size) || (buffer == NULL)) {
*buffer_size = desired_size;
TRACE_OUT(group_marshal_func);
return (NS_RETURN);
}
memcpy(&new_grp, grp, sizeof(struct group));
memset(buffer, 0, desired_size);
*buffer_size = desired_size;
p = buffer + sizeof(struct group) + sizeof(char *);
memcpy(buffer + sizeof(struct group), &p, sizeof(char *));
p = (char *)ALIGN(p);
if (new_grp.gr_name != NULL) {
size = strlen(new_grp.gr_name);
memcpy(p, new_grp.gr_name, size);
new_grp.gr_name = p;
p += size + 1;
}
if (new_grp.gr_passwd != NULL) {
size = strlen(new_grp.gr_passwd);
memcpy(p, new_grp.gr_passwd, size);
new_grp.gr_passwd = p;
p += size + 1;
}
if (new_grp.gr_mem != NULL) {
p = (char *)ALIGN(p);
memcpy(p, new_grp.gr_mem, sizeof(char *) * mem_size);
new_grp.gr_mem = (char **)p;
p += sizeof(char *) * (mem_size + 1);
for (mem = new_grp.gr_mem; *mem; ++mem) {
size = strlen(*mem);
memcpy(p, *mem, size);
*mem = p;
p += size + 1;
}
}
memcpy(buffer, &new_grp, sizeof(struct group));
TRACE_OUT(group_marshal_func);
return (NS_SUCCESS);
}
static int
group_lookup_func(const char *key, size_t key_size, char **buffer,
size_t *buffer_size)
{
enum nss_lookup_type lookup_type;
char *name;
size_t size;
gid_t gid;
struct group *result;
TRACE_IN(group_lookup_func);
assert(buffer != NULL);
assert(buffer_size != NULL);
if (key_size < sizeof(enum nss_lookup_type)) {
TRACE_OUT(group_lookup_func);
return (NS_UNAVAIL);
}
memcpy(&lookup_type, key, sizeof(enum nss_lookup_type));
switch (lookup_type) {
case nss_lt_name:
size = key_size - sizeof(enum nss_lookup_type) + 1;
name = (char *)malloc(size);
assert(name != NULL);
memset(name, 0, size);
memcpy(name, key + sizeof(enum nss_lookup_type), size - 1);
break;
case nss_lt_id:
if (key_size < sizeof(enum nss_lookup_type) +
sizeof(gid_t)) {
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
memcpy(&gid, key + sizeof(enum nss_lookup_type), sizeof(gid_t));
break;
default:
TRACE_OUT(group_lookup_func);
return (NS_UNAVAIL);
}
switch (lookup_type) {
case nss_lt_name:
TRACE_STR(name);
result = getgrnam(name);
free(name);
break;
case nss_lt_id:
result = getgrgid(gid);
break;
default:
/* SHOULD NOT BE REACHED */
break;
}
if (result != NULL) {
group_marshal_func(result, NULL, buffer_size);
*buffer = (char *)malloc(*buffer_size);
assert(*buffer != NULL);
group_marshal_func(result, *buffer, buffer_size);
}
TRACE_OUT(group_lookup_func);
return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
}
static void *
group_mp_init_func()
{
TRACE_IN(group_mp_init_func);
setgrent();
TRACE_OUT(group_mp_init_func);
return (NULL);
}
static int
group_mp_lookup_func(char **buffer, size_t *buffer_size, void *mdata)
{
struct group *result;
TRACE_IN(group_mp_lookup_func);
result = getgrent();
if (result != NULL) {
group_marshal_func(result, NULL, buffer_size);
*buffer = (char *)malloc(*buffer_size);
assert(*buffer != NULL);
group_marshal_func(result, *buffer, buffer_size);
}
TRACE_OUT(group_mp_lookup_func);
return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
}
static void
group_mp_destroy_func(void *mdata)
{
TRACE_IN(group_mp_destroy_func);
TRACE_OUT(group_mp_destroy_func);
}
struct agent *
init_group_agent()
{
struct common_agent *retval;
TRACE_IN(init_group_agent);
retval = (struct common_agent *)malloc(sizeof(struct common_agent));
assert(retval != NULL);
memset(retval, 0, sizeof(struct common_agent));
retval->parent.name = strdup("group");
assert(retval->parent.name != NULL);
retval->parent.type = COMMON_AGENT;
retval->lookup_func = group_lookup_func;
TRACE_OUT(init_group_agent);
return ((struct agent *)retval);
}
struct agent *
init_group_mp_agent()
{
struct multipart_agent *retval;
TRACE_IN(init_group_mp_agent);
retval = (struct multipart_agent *)malloc(
sizeof(struct multipart_agent));
assert(retval != NULL);
memset(retval, 0, sizeof(struct multipart_agent));
retval->parent.name = strdup("group");
retval->parent.type = MULTIPART_AGENT;
retval->mp_init_func = group_mp_init_func;
retval->mp_lookup_func = group_mp_lookup_func;
retval->mp_destroy_func = group_mp_destroy_func;
assert(retval->parent.name != NULL);
TRACE_OUT(init_group_mp_agent);
return ((struct agent *)retval);
}

View File

@ -1,32 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "../agent.h"
extern struct agent *init_group_agent();
extern struct agent *init_group_mp_agent();

View File

@ -1,269 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <assert.h>
#include <nsswitch.h>
#include <pwd.h>
#include <string.h>
#include <stdlib.h>
#include "../debug.h"
#include "passwd.h"
static int passwd_marshal_func(struct passwd *, char *, size_t *);
static int passwd_lookup_func(const char *, size_t, char **, size_t *);
static void *passwd_mp_init_func();
static int passwd_mp_lookup_func(char **, size_t *, void *);
static void passwd_mp_destroy_func(void *mdata);
static int
passwd_marshal_func(struct passwd *pwd, char *buffer, size_t *buffer_size)
{
char *p;
struct passwd new_pwd;
size_t desired_size, size;
TRACE_IN(passwd_marshal_func);
desired_size = sizeof(struct passwd) + sizeof(char *) +
strlen(pwd->pw_name) + 1;
if (pwd->pw_passwd != NULL)
desired_size += strlen(pwd->pw_passwd) + 1;
if (pwd->pw_class != NULL)
desired_size += strlen(pwd->pw_class) + 1;
if (pwd->pw_gecos != NULL)
desired_size += strlen(pwd->pw_gecos) + 1;
if (pwd->pw_dir != NULL)
desired_size += strlen(pwd->pw_dir) + 1;
if (pwd->pw_shell != NULL)
desired_size += strlen(pwd->pw_shell) + 1;
if ((*buffer_size < desired_size) || (buffer == NULL)) {
*buffer_size = desired_size;
TRACE_OUT(passwd_marshal_func);
return (NS_RETURN);
}
memcpy(&new_pwd, pwd, sizeof(struct passwd));
memset(buffer, 0, desired_size);
*buffer_size = desired_size;
p = buffer + sizeof(struct passwd) + sizeof(char *);
memcpy(buffer + sizeof(struct passwd), &p, sizeof(char *));
if (new_pwd.pw_name != NULL) {
size = strlen(new_pwd.pw_name);
memcpy(p, new_pwd.pw_name, size);
new_pwd.pw_name = p;
p += size + 1;
}
if (new_pwd.pw_passwd != NULL) {
size = strlen(new_pwd.pw_passwd);
memcpy(p, new_pwd.pw_passwd, size);
new_pwd.pw_passwd = p;
p += size + 1;
}
if (new_pwd.pw_class != NULL) {
size = strlen(new_pwd.pw_class);
memcpy(p, new_pwd.pw_class, size);
new_pwd.pw_class = p;
p += size + 1;
}
if (new_pwd.pw_gecos != NULL) {
size = strlen(new_pwd.pw_gecos);
memcpy(p, new_pwd.pw_gecos, size);
new_pwd.pw_gecos = p;
p += size + 1;
}
if (new_pwd.pw_dir != NULL) {
size = strlen(new_pwd.pw_dir);
memcpy(p, new_pwd.pw_dir, size);
new_pwd.pw_dir = p;
p += size + 1;
}
if (new_pwd.pw_shell != NULL) {
size = strlen(new_pwd.pw_shell);
memcpy(p, new_pwd.pw_shell, size);
new_pwd.pw_shell = p;
p += size + 1;
}
memcpy(buffer, &new_pwd, sizeof(struct passwd));
TRACE_OUT(passwd_marshal_func);
return (NS_SUCCESS);
}
static int
passwd_lookup_func(const char *key, size_t key_size, char **buffer,
size_t *buffer_size)
{
enum nss_lookup_type lookup_type;
char *login;
size_t size;
uid_t uid;
struct passwd *result;
TRACE_IN(passwd_lookup_func);
assert(buffer != NULL);
assert(buffer_size != NULL);
if (key_size < sizeof(enum nss_lookup_type)) {
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
memcpy(&lookup_type, key, sizeof(enum nss_lookup_type));
switch (lookup_type) {
case nss_lt_name:
size = key_size - sizeof(enum nss_lookup_type) + 1;
login = (char *)malloc(size);
assert(login != NULL);
memset(login, 0, size);
memcpy(login, key + sizeof(enum nss_lookup_type), size - 1);
break;
case nss_lt_id:
if (key_size < sizeof(enum nss_lookup_type) +
sizeof(uid_t)) {
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
memcpy(&uid, key + sizeof(enum nss_lookup_type), sizeof(uid_t));
break;
default:
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
switch (lookup_type) {
case nss_lt_name:
result = getpwnam(login);
free(login);
break;
case nss_lt_id:
result = getpwuid(uid);
break;
default:
/* SHOULD NOT BE REACHED */
break;
}
if (result != NULL) {
passwd_marshal_func(result, NULL, buffer_size);
*buffer = (char *)malloc(*buffer_size);
assert(*buffer != NULL);
passwd_marshal_func(result, *buffer, buffer_size);
}
TRACE_OUT(passwd_lookup_func);
return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
}
static void *
passwd_mp_init_func()
{
TRACE_IN(passwd_mp_init_func);
setpwent();
TRACE_OUT(passwd_mp_init_func);
return (NULL);
}
static int
passwd_mp_lookup_func(char **buffer, size_t *buffer_size, void *mdata)
{
struct passwd *result;
TRACE_IN(passwd_mp_lookup_func);
result = getpwent();
if (result != NULL) {
passwd_marshal_func(result, NULL, buffer_size);
*buffer = (char *)malloc(*buffer_size);
assert(*buffer != NULL);
passwd_marshal_func(result, *buffer, buffer_size);
}
TRACE_OUT(passwd_mp_lookup_func);
return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
}
static void
passwd_mp_destroy_func(void *mdata)
{
TRACE_IN(passwd_mp_destroy_func);
TRACE_OUT(passwd_mp_destroy_func);
}
struct agent *
init_passwd_agent()
{
struct common_agent *retval;
TRACE_IN(init_passwd_agent);
retval = (struct common_agent *)malloc(sizeof(struct common_agent));
assert(retval != NULL);
memset(retval, 0, sizeof(struct common_agent));
retval->parent.name = strdup("passwd");
assert(retval->parent.name != NULL);
retval->parent.type = COMMON_AGENT;
retval->lookup_func = passwd_lookup_func;
TRACE_OUT(init_passwd_agent);
return ((struct agent *)retval);
}
struct agent *
init_passwd_mp_agent()
{
struct multipart_agent *retval;
TRACE_IN(init_passwd_mp_agent);
retval = (struct multipart_agent *)malloc(
sizeof(struct multipart_agent));
assert(retval != NULL);
memset(retval, 0, sizeof(struct multipart_agent));
retval->parent.name = strdup("passwd");
retval->parent.type = MULTIPART_AGENT;
retval->mp_init_func = passwd_mp_init_func;
retval->mp_lookup_func = passwd_mp_lookup_func;
retval->mp_destroy_func = passwd_mp_destroy_func;
assert(retval->parent.name != NULL);
TRACE_OUT(init_passwd_mp_agent);
return ((struct agent *)retval);
}

View File

@ -1,32 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "../agent.h"
extern struct agent *init_passwd_agent();
extern struct agent *init_passwd_mp_agent();

View File

@ -1,284 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/types.h>
#include <assert.h>
#include <nsswitch.h>
#include <netdb.h>
#include <string.h>
#include <stdlib.h>
#include "../debug.h"
#include "services.h"
static int services_marshal_func(struct servent *, char *, size_t *);
static int services_lookup_func(const char *, size_t, char **, size_t *);
static void *services_mp_init_func();
static int services_mp_lookup_func(char **, size_t *, void *);
static void services_mp_destroy_func(void *);
static int
services_marshal_func(struct servent *serv, char *buffer, size_t *buffer_size)
{
struct servent new_serv;
size_t desired_size;
char **alias;
char *p;
size_t size;
size_t aliases_size;
TRACE_IN(services_marshal_func);
desired_size = ALIGNBYTES + sizeof(struct servent) + sizeof(char *);
if (serv->s_name != NULL)
desired_size += strlen(serv->s_name) + 1;
if (serv->s_proto != NULL)
desired_size += strlen(serv->s_proto) + 1;
aliases_size = 0;
if (serv->s_aliases != NULL) {
for (alias = serv->s_aliases; *alias; ++alias) {
desired_size += strlen(*alias) + 1;
++aliases_size;
}
desired_size += ALIGNBYTES + sizeof(char *) *
(aliases_size + 1);
}
if ((*buffer_size < desired_size) || (buffer == NULL)) {
*buffer_size = desired_size;
TRACE_OUT(services_marshal_func);
return (NS_RETURN);
}
memcpy(&new_serv, serv, sizeof(struct servent));
memset(buffer, 0, desired_size);
*buffer_size = desired_size;
p = buffer + sizeof(struct servent) + sizeof(char *);
memcpy(buffer + sizeof(struct servent), &p, sizeof(char *));
p = (char *)ALIGN(p);
if (new_serv.s_name != NULL) {
size = strlen(new_serv.s_name);
memcpy(p, new_serv.s_name, size);
new_serv.s_name = p;
p += size + 1;
}
if (new_serv.s_proto != NULL) {
size = strlen(new_serv.s_proto);
memcpy(p, new_serv.s_proto, size);
new_serv.s_proto = p;
p += size + 1;
}
if (new_serv.s_aliases != NULL) {
p = (char *)ALIGN(p);
memcpy(p, new_serv.s_aliases, sizeof(char *) * aliases_size);
new_serv.s_aliases = (char **)p;
p += sizeof(char *) * (aliases_size + 1);
for (alias = new_serv.s_aliases; *alias; ++alias) {
size = strlen(*alias);
memcpy(p, *alias, size);
*alias = p;
p += size + 1;
}
}
memcpy(buffer, &new_serv, sizeof(struct servent));
TRACE_OUT(services_marshal_func);
return (NS_SUCCESS);
}
static int
services_lookup_func(const char *key, size_t key_size, char **buffer,
size_t *buffer_size)
{
enum nss_lookup_type lookup_type;
char *name = NULL;
char *proto = NULL;
size_t size, size2;
int port;
struct servent *result;
TRACE_IN(services_lookup_func);
assert(buffer != NULL);
assert(buffer_size != NULL);
if (key_size < sizeof(enum nss_lookup_type)) {
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
memcpy(&lookup_type, key, sizeof(enum nss_lookup_type));
switch (lookup_type) {
case nss_lt_name:
size = key_size - sizeof(enum nss_lookup_type);
name = (char *)malloc(size + 1);
assert(name != NULL);
memset(name, 0, size + 1);
memcpy(name, key + sizeof(enum nss_lookup_type), size);
size2 = strlen(name) + 1;
if (size2 < size)
proto = name + size2;
else
proto = NULL;
break;
case nss_lt_id:
if (key_size < sizeof(enum nss_lookup_type) +
sizeof(int)) {
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
memcpy(&port, key + sizeof(enum nss_lookup_type),
sizeof(int));
size = key_size - sizeof(enum nss_lookup_type) - sizeof(int);
if (size > 0) {
proto = (char *)malloc(size + 1);
assert(proto != NULL);
memset(proto, size + 1, 0);
memcpy(proto, key + sizeof(enum nss_lookup_type) +
sizeof(int), size);
}
break;
default:
TRACE_OUT(passwd_lookup_func);
return (NS_UNAVAIL);
}
switch (lookup_type) {
case nss_lt_name:
result = getservbyname(name, proto);
free(name);
break;
case nss_lt_id:
result = getservbyport(port, proto);
free(proto);
break;
default:
/* SHOULD NOT BE REACHED */
break;
}
if (result != NULL) {
services_marshal_func(result, NULL, buffer_size);
*buffer = (char *)malloc(*buffer_size);
assert(*buffer != NULL);
services_marshal_func(result, *buffer, buffer_size);
}
TRACE_OUT(services_lookup_func);
return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
}
static void *
services_mp_init_func()
{
TRACE_IN(services_mp_init_func);
setservent(0);
TRACE_OUT(services_mp_init_func);
return (NULL);
}
static int
services_mp_lookup_func(char **buffer, size_t *buffer_size, void *mdata)
{
struct servent *result;
TRACE_IN(services_mp_lookup_func);
result = getservent();
if (result != NULL) {
services_marshal_func(result, NULL, buffer_size);
*buffer = (char *)malloc(*buffer_size);
assert(*buffer != NULL);
services_marshal_func(result, *buffer, buffer_size);
}
TRACE_OUT(services_mp_lookup_func);
return (result == NULL ? NS_NOTFOUND : NS_SUCCESS);
}
static void
services_mp_destroy_func(void *mdata)
{
TRACE_IN(services_mp_destroy_func);
TRACE_OUT(services_mp_destroy_func);
}
struct agent *
init_services_agent()
{
struct common_agent *retval;
TRACE_IN(init_services_agent);
retval = (struct common_agent *)malloc(sizeof(struct common_agent));
assert(retval != NULL);
memset(retval, 0, sizeof(struct common_agent));
retval->parent.name = strdup("services");
assert(retval->parent.name != NULL);
retval->parent.type = COMMON_AGENT;
retval->lookup_func = services_lookup_func;
TRACE_OUT(init_services_agent);
return ((struct agent *)retval);
}
struct agent *
init_services_mp_agent()
{
struct multipart_agent *retval;
TRACE_IN(init_services_mp_agent);
retval = (struct multipart_agent *)malloc(
sizeof(struct multipart_agent));
assert(retval != NULL);
memset(retval, 0, sizeof(struct multipart_agent));
retval->parent.name = strdup("services");
retval->parent.type = MULTIPART_AGENT;
retval->mp_init_func = services_mp_init_func;
retval->mp_lookup_func = services_mp_lookup_func;
retval->mp_destroy_func = services_mp_destroy_func;
assert(retval->parent.name != NULL);
TRACE_OUT(init_services_mp_agent);
return ((struct agent *)retval);
}

View File

@ -1,32 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#include "../agent.h"
extern struct agent *init_services_agent();
extern struct agent *init_services_mp_agent();

View File

@ -1,165 +0,0 @@
.\" Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd October 20, 2005
.Dt CACHED 8
.Os
.Sh NAME
.Nm cached
.Nd "caching server daemon"
.Sh SYNOPSIS
.Nm
.Op Fl dnst
.Op Fl i Ar cachename
.Op Fl I Ar cachename
.Sh DESCRIPTION
The
.Nm
utility
is the system caching daemon.
It can cache almost all types of data and is basically intended to be used
with the
.Nm nsswitch
subsystem.
The cache is actually per-user.
This means that each user can work only with the
cached data that were cached by themselves, and cannot poison the
cache of other users.
The
.Nm
utility supports two types of caching:
.Bl -tag -width ".Sy Type"
.It Sy Type
.Sy Description
.It Common caching
Each cached element is the key+value pair.
This type of caching supports policies which are applied when maximum
number of cached elements is exceeded.
Three policies are available:
.Cm FIFO
(first in - first out),
.Cm LRU
(least recently used) and
.Cm LFU
(least frequently used).
This type of caching is used with the
.Fn getXXXbyname
family of functions.
.It Multipart caching
Each cached element is the part of the elements sequence.
This type of caching is intended to be used with the
.Fn getXXXent
family of functions.
.El
.Pp
The
.Nm
utility is able not only to cache elements, but to perform the actual nsswitch
lookups by itself.
To enable this feature, use the
.Va perform-actual-lookups
parameter in
.Xr cached.conf 5 .
.Pp
The
.Nm
utility recognizes the following runtime options:
.Bl -tag -width indent
.\" .It Fl d
.\" XXX Document me!
.It Fl n
Do not daemonize;
.Nm
will not fork or disconnect itself from the terminal.
.It Fl s
Single-threaded mode.
Forces using only one thread for all processing purposes (it overrides
the
.Va threads
parameter in the
.Xr cached.conf 5
file).
.It Fl t
Trace mode.
All trace messages will be written to stdout.
This mode is usually used with
.Fl n
and
.Fl s
flags are used for debugging purposes.
.It Fl i Ar cachename
Invalidates personal cache.
When specified,
.Nm
acts as the administration tool.
It asks the already running
.Nm
to invalidate the specified part of the cache of the
calling user.
For example, sometimes you may want to invalidate your
.Dq Li hosts
cache.
You can specify
.Dq Li all
as the
.Ar cachename
to invalidate your personal cache as a whole.
You cannot use this option for the
.Ar cachename
for which the
.Va perform-actual-lookups
option is enabled.
.It Fl I Ar cachename
Invalidates the cache for every user.
When specified,
.Nm
acts as the administration tool.
It asks the already
running
.Nm
to invalidate the specified part of the cache for
every user.
You can specify
.Dq Li all
as the
.Ar cachename
to invalidate the whole cache.
Only the root can use this option.
.El
.Sh FILES
.Bl -tag -width ".Pa /etc/cached.conf" -compact
.It Pa /etc/cached.conf
The default configuration file.
.El
.Sh SEE ALSO
.Xr nsdispatch 3 ,
.Xr cached.conf 5 ,
.Xr nsswitch.conf 5
.Sh AUTHORS
.An Michael Bushkov Aq bushman@rsu.ru
.Sh BUGS
Please send bug reports and suggestions to
.Aq bushman@rsu.ru .

View File

@ -1,885 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in thereg
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/event.h>
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/param.h>
#include <sys/un.h>
#include <assert.h>
#include <err.h>
#include <errno.h>
#include <fcntl.h>
#include <libutil.h>
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "agents/passwd.h"
#include "agents/group.h"
#include "agents/services.h"
#include "cachedcli.h"
#include "cachelib.h"
#include "config.h"
#include "debug.h"
#include "log.h"
#include "parser.h"
#include "query.h"
#include "singletons.h"
#ifndef CONFIG_PATH
#define CONFIG_PATH "/etc/cached.conf"
#endif
#define DEFAULT_CONFIG_PATH "cached.conf"
#define MAX_SOCKET_IO_SIZE 4096
struct processing_thread_args {
cache the_cache;
struct configuration *the_configuration;
struct runtime_env *the_runtime_env;
};
static void accept_connection(struct kevent *, struct runtime_env *,
struct configuration *);
static void destroy_cache_(cache);
static void destroy_runtime_env(struct runtime_env *);
static cache init_cache_(struct configuration *);
static struct runtime_env *init_runtime_env(struct configuration *);
static void print_version_info(void);
static void processing_loop(cache, struct runtime_env *,
struct configuration *);
static void process_socket_event(struct kevent *, struct runtime_env *,
struct configuration *);
static void process_timer_event(struct kevent *, struct runtime_env *,
struct configuration *);
static void *processing_thread(void *);
static void usage(void);
void get_time_func(struct timeval *);
static void
print_version_info(void)
{
TRACE_IN(print_version_info);
printf("cached v0.2 (20 Oct 2005)\nwas developed during SoC 2005\n");
TRACE_OUT(print_version_info);
}
static void
usage(void)
{
fprintf(stderr,
"usage: cached [-dnst] [-i cachename] [-I cachename]\n");
exit(1);
}
static cache
init_cache_(struct configuration *config)
{
struct cache_params params;
cache retval;
struct configuration_entry *config_entry;
size_t size, i;
int res;
TRACE_IN(init_cache_);
memset(&params, 0, sizeof(struct cache_params));
params.get_time_func = get_time_func;
retval = init_cache(&params);
size = configuration_get_entries_size(config);
for (i = 0; i < size; ++i) {
config_entry = configuration_get_entry(config, i);
/*
* We should register common entries now - multipart entries
* would be registered automatically during the queries.
*/
res = register_cache_entry(retval, (struct cache_entry_params *)
&config_entry->positive_cache_params);
config_entry->positive_cache_entry = find_cache_entry(retval,
config_entry->positive_cache_params.entry_name);
assert(config_entry->positive_cache_entry !=
INVALID_CACHE_ENTRY);
res = register_cache_entry(retval, (struct cache_entry_params *)
&config_entry->negative_cache_params);
config_entry->negative_cache_entry = find_cache_entry(retval,
config_entry->negative_cache_params.entry_name);
assert(config_entry->negative_cache_entry !=
INVALID_CACHE_ENTRY);
}
LOG_MSG_2("cache", "cache was successfully initialized");
TRACE_OUT(init_cache_);
return (retval);
}
static void
destroy_cache_(cache the_cache)
{
TRACE_IN(destroy_cache_);
destroy_cache(the_cache);
TRACE_OUT(destroy_cache_);
}
/*
* Socket and kqueues are prepared here. We have one global queue for both
* socket and timers events.
*/
static struct runtime_env *
init_runtime_env(struct configuration *config)
{
int serv_addr_len;
struct sockaddr_un serv_addr;
struct kevent eventlist;
struct timespec timeout;
struct runtime_env *retval;
TRACE_IN(init_runtime_env);
retval = (struct runtime_env *)malloc(sizeof(struct runtime_env));
assert(retval != NULL);
memset(retval, 0, sizeof(struct runtime_env));
retval->sockfd = socket(PF_LOCAL, SOCK_STREAM, 0);
if (config->force_unlink == 1)
unlink(config->socket_path);
memset(&serv_addr, 0, sizeof(struct sockaddr_un));
serv_addr.sun_family = PF_LOCAL;
strncpy(serv_addr.sun_path, config->socket_path,
sizeof(serv_addr.sun_path));
serv_addr_len = sizeof(serv_addr.sun_family) +
strlen(serv_addr.sun_path) + 1;
if (bind(retval->sockfd, (struct sockaddr *)&serv_addr,
serv_addr_len) == -1) {
close(retval->sockfd);
free(retval);
LOG_ERR_2("runtime environment", "can't bind socket to path: "
"%s", config->socket_path);
TRACE_OUT(init_runtime_env);
return (NULL);
}
LOG_MSG_2("runtime environment", "using socket %s",
config->socket_path);
/*
* Here we're marking socket as non-blocking and setting its backlog
* to the maximum value
*/
chmod(config->socket_path, config->socket_mode);
listen(retval->sockfd, -1);
fcntl(retval->sockfd, F_SETFL, O_NONBLOCK);
retval->queue = kqueue();
assert(retval->queue != -1);
EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD | EV_ONESHOT,
0, 0, 0);
memset(&timeout, 0, sizeof(struct timespec));
kevent(retval->queue, &eventlist, 1, NULL, 0, &timeout);
LOG_MSG_2("runtime environment", "successfully initialized");
TRACE_OUT(init_runtime_env);
return (retval);
}
static void
destroy_runtime_env(struct runtime_env *env)
{
TRACE_IN(destroy_runtime_env);
close(env->queue);
close(env->sockfd);
free(env);
TRACE_OUT(destroy_runtime_env);
}
static void
accept_connection(struct kevent *event_data, struct runtime_env *env,
struct configuration *config)
{
struct kevent eventlist[2];
struct timespec timeout;
struct query_state *qstate;
int fd;
int res;
uid_t euid;
gid_t egid;
TRACE_IN(accept_connection);
fd = accept(event_data->ident, NULL, NULL);
if (fd == -1) {
LOG_ERR_2("accept_connection", "error %d during accept()",
errno);
TRACE_OUT(accept_connection);
return;
}
if (getpeereid(fd, &euid, &egid) != 0) {
LOG_ERR_2("accept_connection", "error %d during getpeereid()",
errno);
TRACE_OUT(accept_connection);
return;
}
qstate = init_query_state(fd, sizeof(int), euid, egid);
if (qstate == NULL) {
LOG_ERR_2("accept_connection", "can't init query_state");
TRACE_OUT(accept_connection);
return;
}
memset(&timeout, 0, sizeof(struct timespec));
EV_SET(&eventlist[0], fd, EVFILT_TIMER, EV_ADD | EV_ONESHOT,
0, qstate->timeout.tv_sec * 1000, qstate);
EV_SET(&eventlist[1], fd, EVFILT_READ, EV_ADD | EV_ONESHOT,
NOTE_LOWAT, qstate->kevent_watermark, qstate);
res = kevent(env->queue, eventlist, 2, NULL, 0, &timeout);
if (res < 0)
LOG_ERR_2("accept_connection", "kevent error");
TRACE_OUT(accept_connection);
}
static void
process_socket_event(struct kevent *event_data, struct runtime_env *env,
struct configuration *config)
{
struct kevent eventlist[2];
struct timeval query_timeout;
struct timespec kevent_timeout;
int nevents;
int eof_res, res;
ssize_t io_res;
struct query_state *qstate;
TRACE_IN(process_socket_event);
eof_res = event_data->flags & EV_EOF ? 1 : 0;
res = 0;
memset(&kevent_timeout, 0, sizeof(struct timespec));
EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER, EV_DELETE,
0, 0, NULL);
nevents = kevent(env->queue, eventlist, 1, NULL, 0, &kevent_timeout);
if (nevents == -1) {
if (errno == ENOENT) {
/* the timer is already handling this event */
TRACE_OUT(process_socket_event);
return;
} else {
/* some other error happened */
LOG_ERR_2("process_socket_event", "kevent error, errno"
" is %d", errno);
TRACE_OUT(process_socket_event);
return;
}
}
qstate = (struct query_state *)event_data->udata;
/*
* If the buffer that is to be send/received is too large,
* we send it implicitly, by using query_io_buffer_read and
* query_io_buffer_write functions in the query_state. These functions
* use the temporary buffer, which is later send/received in parts.
* The code below implements buffer splitting/mergind for send/receive
* operations. It also does the actual socket IO operations.
*/
if (((qstate->use_alternate_io == 0) &&
(qstate->kevent_watermark <= event_data->data)) ||
((qstate->use_alternate_io != 0) &&
(qstate->io_buffer_watermark <= event_data->data))) {
if (qstate->use_alternate_io != 0) {
switch (qstate->io_buffer_filter) {
case EVFILT_READ:
io_res = query_socket_read(qstate,
qstate->io_buffer_p,
qstate->io_buffer_watermark);
if (io_res < 0) {
qstate->use_alternate_io = 0;
qstate->process_func = NULL;
} else {
qstate->io_buffer_p += io_res;
if (qstate->io_buffer_p ==
qstate->io_buffer +
qstate->io_buffer_size) {
qstate->io_buffer_p =
qstate->io_buffer;
qstate->use_alternate_io = 0;
}
}
break;
default:
break;
}
}
if (qstate->use_alternate_io == 0) {
do {
res = qstate->process_func(qstate);
} while ((qstate->kevent_watermark == 0) &&
(qstate->process_func != NULL) &&
(res == 0));
if (res != 0)
qstate->process_func = NULL;
}
if ((qstate->use_alternate_io != 0) &&
(qstate->io_buffer_filter == EVFILT_WRITE)) {
io_res = query_socket_write(qstate, qstate->io_buffer_p,
qstate->io_buffer_watermark);
if (io_res < 0) {
qstate->use_alternate_io = 0;
qstate->process_func = NULL;
} else
qstate->io_buffer_p += io_res;
}
} else {
/* assuming that socket was closed */
qstate->process_func = NULL;
qstate->use_alternate_io = 0;
}
if (((qstate->process_func == NULL) &&
(qstate->use_alternate_io == 0)) ||
(eof_res != 0) || (res != 0)) {
destroy_query_state(qstate);
close(event_data->ident);
TRACE_OUT(process_socket_event);
return;
}
/* updating the query_state lifetime variable */
get_time_func(&query_timeout);
query_timeout.tv_usec = 0;
query_timeout.tv_sec -= qstate->creation_time.tv_sec;
if (query_timeout.tv_sec > qstate->timeout.tv_sec)
query_timeout.tv_sec = 0;
else
query_timeout.tv_sec = qstate->timeout.tv_sec -
query_timeout.tv_sec;
if ((qstate->use_alternate_io != 0) && (qstate->io_buffer_p ==
qstate->io_buffer + qstate->io_buffer_size))
qstate->use_alternate_io = 0;
if (qstate->use_alternate_io == 0) {
/*
* If we must send/receive the large block of data,
* we should prepare the query_state's io_XXX fields.
* We should also substitute its write_func and read_func
* with the query_io_buffer_write and query_io_buffer_read,
* which will allow us to implicitly send/receive this large
* buffer later (in the subsequent calls to the
* process_socket_event).
*/
if (qstate->kevent_watermark > MAX_SOCKET_IO_SIZE) {
if (qstate->io_buffer != NULL)
free(qstate->io_buffer);
qstate->io_buffer = (char *)malloc(
qstate->kevent_watermark);
assert(qstate->io_buffer != NULL);
memset(qstate->io_buffer, 0, qstate->kevent_watermark);
qstate->io_buffer_p = qstate->io_buffer;
qstate->io_buffer_size = qstate->kevent_watermark;
qstate->io_buffer_filter = qstate->kevent_filter;
qstate->write_func = query_io_buffer_write;
qstate->read_func = query_io_buffer_read;
if (qstate->kevent_filter == EVFILT_READ)
qstate->use_alternate_io = 1;
qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
EV_SET(&eventlist[1], event_data->ident,
qstate->kevent_filter, EV_ADD | EV_ONESHOT,
NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
} else {
EV_SET(&eventlist[1], event_data->ident,
qstate->kevent_filter, EV_ADD | EV_ONESHOT,
NOTE_LOWAT, qstate->kevent_watermark, qstate);
}
} else {
if (qstate->io_buffer + qstate->io_buffer_size -
qstate->io_buffer_p <
MAX_SOCKET_IO_SIZE) {
qstate->io_buffer_watermark = qstate->io_buffer +
qstate->io_buffer_size - qstate->io_buffer_p;
EV_SET(&eventlist[1], event_data->ident,
qstate->io_buffer_filter,
EV_ADD | EV_ONESHOT, NOTE_LOWAT,
qstate->io_buffer_watermark,
qstate);
} else {
qstate->io_buffer_watermark = MAX_SOCKET_IO_SIZE;
EV_SET(&eventlist[1], event_data->ident,
qstate->io_buffer_filter, EV_ADD | EV_ONESHOT,
NOTE_LOWAT, MAX_SOCKET_IO_SIZE, qstate);
}
}
EV_SET(&eventlist[0], event_data->ident, EVFILT_TIMER,
EV_ADD | EV_ONESHOT, 0, query_timeout.tv_sec * 1000, qstate);
kevent(env->queue, eventlist, 2, NULL, 0, &kevent_timeout);
TRACE_OUT(process_socket_event);
}
/*
* This routine is called if timer event has been signaled in the kqueue. It
* just closes the socket and destroys the query_state.
*/
static void
process_timer_event(struct kevent *event_data, struct runtime_env *env,
struct configuration *config)
{
struct query_state *qstate;
TRACE_IN(process_timer_event);
qstate = (struct query_state *)event_data->udata;
destroy_query_state(qstate);
close(event_data->ident);
TRACE_OUT(process_timer_event);
}
/*
* Processing loop is the basic processing routine, that forms a body of each
* procssing thread
*/
static void
processing_loop(cache the_cache, struct runtime_env *env,
struct configuration *config)
{
struct timespec timeout;
const int eventlist_size = 1;
struct kevent eventlist[eventlist_size];
int nevents, i;
TRACE_MSG("=> processing_loop");
memset(&timeout, 0, sizeof(struct timespec));
memset(&eventlist, 0, sizeof(struct kevent) * eventlist_size);
for (;;) {
nevents = kevent(env->queue, NULL, 0, eventlist,
eventlist_size, NULL);
/*
* we can only receive 1 event on success
*/
if (nevents == 1) {
struct kevent *event_data;
event_data = &eventlist[0];
if (event_data->ident == env->sockfd) {
for (i = 0; i < event_data->data; ++i)
accept_connection(event_data, env, config);
EV_SET(eventlist, s_runtime_env->sockfd,
EVFILT_READ, EV_ADD | EV_ONESHOT,
0, 0, 0);
memset(&timeout, 0,
sizeof(struct timespec));
kevent(s_runtime_env->queue, eventlist,
1, NULL, 0, &timeout);
} else {
switch (event_data->filter) {
case EVFILT_READ:
case EVFILT_WRITE:
process_socket_event(event_data,
env, config);
break;
case EVFILT_TIMER:
process_timer_event(event_data,
env, config);
break;
default:
break;
}
}
} else {
/* this branch shouldn't be currently executed */
}
}
TRACE_MSG("<= processing_loop");
}
/*
* Wrapper above the processing loop function. It sets the thread signal mask
* to avoid SIGPIPE signals (which can happen if the client works incorrectly).
*/
static void *
processing_thread(void *data)
{
struct processing_thread_args *args;
sigset_t new;
TRACE_MSG("=> processing_thread");
args = (struct processing_thread_args *)data;
sigemptyset(&new);
sigaddset(&new, SIGPIPE);
if (pthread_sigmask(SIG_BLOCK, &new, NULL) != 0)
LOG_ERR_1("processing thread",
"thread can't block the SIGPIPE signal");
processing_loop(args->the_cache, args->the_runtime_env,
args->the_configuration);
free(args);
TRACE_MSG("<= processing_thread");
return (NULL);
}
void
get_time_func(struct timeval *time)
{
struct timespec res;
memset(&res, 0, sizeof(struct timespec));
clock_gettime(CLOCK_MONOTONIC, &res);
time->tv_sec = res.tv_sec;
time->tv_usec = 0;
}
/*
* The idea of _nss_cache_cycle_prevention_function is that nsdispatch will
* search for this symbol in the executable. This symbol is the attribute of
* the caching daemon. So, if it exists, nsdispatch won't try to connect to
* the caching daemon and will just ignore the 'cache' source in the
* nsswitch.conf. This method helps to avoid cycles and organize
* self-performing requests.
*/
void
_nss_cache_cycle_prevention_function(void)
{
}
int
main(int argc, char *argv[])
{
struct processing_thread_args *thread_args;
pthread_t *threads;
struct pidfh *pidfile;
pid_t pid;
char const *config_file;
char const *error_str;
int error_line;
int i, res;
int trace_mode_enabled;
int force_single_threaded;
int do_not_daemonize;
int clear_user_cache_entries, clear_all_cache_entries;
char *user_config_entry_name, *global_config_entry_name;
int show_statistics;
int daemon_mode, interactive_mode;
/* by default all debug messages are omitted */
TRACE_OFF();
/* startup output */
print_version_info();
/* parsing command line arguments */
trace_mode_enabled = 0;
force_single_threaded = 0;
do_not_daemonize = 0;
clear_user_cache_entries = 0;
clear_all_cache_entries = 0;
show_statistics = 0;
user_config_entry_name = NULL;
global_config_entry_name = NULL;
while ((res = getopt(argc, argv, "nstdi:I:")) != -1) {
switch (res) {
case 'n':
do_not_daemonize = 1;
break;
case 's':
force_single_threaded = 1;
break;
case 't':
trace_mode_enabled = 1;
break;
case 'i':
clear_user_cache_entries = 1;
if (optarg != NULL)
if (strcmp(optarg, "all") != 0)
user_config_entry_name = strdup(optarg);
break;
case 'I':
clear_all_cache_entries = 1;
if (optarg != NULL)
if (strcmp(optarg, "all") != 0)
global_config_entry_name =
strdup(optarg);
break;
case 'd':
show_statistics = 1;
break;
case '?':
default:
usage();
/* NOT REACHED */
}
}
daemon_mode = do_not_daemonize | force_single_threaded |
trace_mode_enabled;
interactive_mode = clear_user_cache_entries | clear_all_cache_entries |
show_statistics;
if ((daemon_mode != 0) && (interactive_mode != 0)) {
LOG_ERR_1("main", "daemon mode and interactive_mode arguments "
"can't be used together");
usage();
}
if (interactive_mode != 0) {
FILE *pidfin = fopen(DEFAULT_PIDFILE_PATH, "r");
char pidbuf[256];
struct cached_connection_params connection_params;
cached_connection connection;
int result;
if (pidfin == NULL)
errx(EXIT_FAILURE, "There is no daemon running.");
memset(pidbuf, 0, sizeof(pidbuf));
fread(pidbuf, sizeof(pidbuf) - 1, 1, pidfin);
fclose(pidfin);
if (ferror(pidfin) != 0)
errx(EXIT_FAILURE, "Can't read from pidfile.");
if (sscanf(pidbuf, "%d", &pid) != 1)
errx(EXIT_FAILURE, "Invalid pidfile.");
LOG_MSG_1("main", "daemon PID is %d", pid);
memset(&connection_params, 0,
sizeof(struct cached_connection_params));
connection_params.socket_path = DEFAULT_SOCKET_PATH;
connection = open_cached_connection__(&connection_params);
if (connection == INVALID_CACHED_CONNECTION)
errx(EXIT_FAILURE, "Can't connect to the daemon.");
if (clear_user_cache_entries != 0) {
result = cached_transform__(connection,
user_config_entry_name, TT_USER);
if (result != 0)
LOG_MSG_1("main",
"user cache transformation failed");
else
LOG_MSG_1("main",
"user cache_transformation "
"succeeded");
}
if (clear_all_cache_entries != 0) {
if (geteuid() != 0)
errx(EXIT_FAILURE, "Only root can initiate "
"global cache transformation.");
result = cached_transform__(connection,
global_config_entry_name, TT_ALL);
if (result != 0)
LOG_MSG_1("main",
"global cache transformation "
"failed");
else
LOG_MSG_1("main",
"global cache transformation "
"succeeded");
}
close_cached_connection__(connection);
free(user_config_entry_name);
free(global_config_entry_name);
return (EXIT_SUCCESS);
}
pidfile = pidfile_open(DEFAULT_PIDFILE_PATH, 0644, &pid);
if (pidfile == NULL) {
if (errno == EEXIST)
errx(EXIT_FAILURE, "Daemon already running, pid: %d.",
pid);
warn("Cannot open or create pidfile");
}
if (trace_mode_enabled == 1)
TRACE_ON();
/* blocking the main thread from receiving SIGPIPE signal */
sigblock(sigmask(SIGPIPE));
/* daemonization */
if (do_not_daemonize == 0) {
res = daemon(0, trace_mode_enabled == 0 ? 0 : 1);
if (res != 0) {
LOG_ERR_1("main", "can't daemonize myself: %s",
strerror(errno));
pidfile_remove(pidfile);
goto fin;
} else
LOG_MSG_1("main", "successfully daemonized");
}
pidfile_write(pidfile);
s_agent_table = init_agent_table();
register_agent(s_agent_table, init_passwd_agent());
register_agent(s_agent_table, init_passwd_mp_agent());
register_agent(s_agent_table, init_group_agent());
register_agent(s_agent_table, init_group_mp_agent());
register_agent(s_agent_table, init_services_agent());
register_agent(s_agent_table, init_services_mp_agent());
LOG_MSG_1("main", "request agents registered successfully");
/*
* Hosts agent can't work properly until we have access to the
* appropriate dtab structures, which are used in nsdispatch
* calls
*
register_agent(s_agent_table, init_hosts_agent());
*/
/* configuration initialization */
s_configuration = init_configuration();
fill_configuration_defaults(s_configuration);
error_str = NULL;
error_line = 0;
config_file = CONFIG_PATH;
res = parse_config_file(s_configuration, config_file, &error_str,
&error_line);
if ((res != 0) && (error_str == NULL)) {
config_file = DEFAULT_CONFIG_PATH;
res = parse_config_file(s_configuration, config_file,
&error_str, &error_line);
}
if (res != 0) {
if (error_str != NULL) {
LOG_ERR_1("main", "error in configuration file(%s, %d): %s\n",
config_file, error_line, error_str);
} else {
LOG_ERR_1("main", "no configuration file found "
"- was looking for %s and %s",
CONFIG_PATH, DEFAULT_CONFIG_PATH);
}
destroy_configuration(s_configuration);
return (-1);
}
if (force_single_threaded == 1)
s_configuration->threads_num = 1;
/* cache initialization */
s_cache = init_cache_(s_configuration);
if (s_cache == NULL) {
LOG_ERR_1("main", "can't initialize the cache");
destroy_configuration(s_configuration);
return (-1);
}
/* runtime environment initialization */
s_runtime_env = init_runtime_env(s_configuration);
if (s_runtime_env == NULL) {
LOG_ERR_1("main", "can't initialize the runtime environment");
destroy_configuration(s_configuration);
destroy_cache_(s_cache);
return (-1);
}
if (s_configuration->threads_num > 1) {
threads = (pthread_t *)malloc(sizeof(pthread_t) *
s_configuration->threads_num);
memset(threads, 0, sizeof(pthread_t) *
s_configuration->threads_num);
for (i = 0; i < s_configuration->threads_num; ++i) {
thread_args = (struct processing_thread_args *)malloc(
sizeof(struct processing_thread_args));
thread_args->the_cache = s_cache;
thread_args->the_runtime_env = s_runtime_env;
thread_args->the_configuration = s_configuration;
LOG_MSG_1("main", "thread #%d was successfully created",
i);
pthread_create(&threads[i], NULL, processing_thread,
thread_args);
thread_args = NULL;
}
for (i = 0; i < s_configuration->threads_num; ++i)
pthread_join(threads[i], NULL);
} else {
LOG_MSG_1("main", "working in single-threaded mode");
processing_loop(s_cache, s_runtime_env, s_configuration);
}
fin:
/* runtime environment destruction */
destroy_runtime_env(s_runtime_env);
/* cache destruction */
destroy_cache_(s_cache);
/* configuration destruction */
destroy_configuration(s_configuration);
/* agents table destruction */
destroy_agent_table(s_agent_table);
pidfile_remove(pidfile);
return (EXIT_SUCCESS);
}

View File

@ -1,148 +0,0 @@
.\" Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
.\" All rights reserved.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd April 30, 2006
.Dt CACHED.CONF 5
.Os
.Sh NAME
.Nm cached.conf
.Nd "caching daemon configuration file"
.Sh DESCRIPTION
The
.Nm
file
is used by the
.Xr cached 8
daemon and is read on its startup.
Its syntax is mostly similar to the
.Pa nscd.conf
syntax in
.Tn Linux
and
.Tn Solaris .
It has some differences, though \[em] see them below.
.Pp
Each line specifies either an attribute and a
.Ar value ,
or an attribute, a
.Ar cachename
and a
.Ar value .
Usual cachenames are
.Dq Li passwd ,
.Dq Li groups ,
.Dq Li hosts ,
.Dq Li services ,
.Dq Li protocols
and
.Dq Li rpc .
You can also use any other
.Ar cachename
(for example, if some third-party
application uses nsswitch).
.Bl -tag -width indent
.It Va threads Op Ar value
Number of threads, which would listen for connections and process requests.
The minimum is 1.
The default value is 8.
.It Va enable-cache Oo Ar cachename Oc Op Cm yes | no
Enables or disables the cache for specified
.Ar cachename .
.It Va positive-time-to-live Oo Ar cachename Oc Op Ar value
Sets the TTL (time-to-live) for the specified cache in seconds.
Larger values can increase system's performance, but they also can affect
the cache coherence.
The default value is 3600.
.It Va positive-policy Oo Ar cachename Oc Op Cm fifo | lru | lfu
The policy that is applied to erase some of the cache elements, when the
size limit of the given
.Ar cachename
is exceeded.
Possible policies are:
.Cm fifo
(first-in-first-out),
.Cm lru
(least-recently-used), and
.Cm lfu
(least-frequently-used).
The default policy is
.Cm lru .
.It Va negative-time-to-live Oo Ar cachename Oc Op Ar value
The TTL of the negative cached elements in seconds.
The larger values can significantly increase system performance in some
environments (when dealing with files with UIDs, which are not in system
databases, for example).
This number should be kept low to avoid the cache coherence problems.
The default value is 60.
.It Va negative-policy Oo Ar cachename Oc Op Cm fifo | lru | lfu
The same as the positive-policy, but this one is applied to the negative
elements of the given
.Ar cachename .
The default policy is fifo.
.It Va suggested-size Oo Ar cachename Oc Op Ar value
This is the internal hash table size.
The value should be a prime number for optimum performance.
You should only change this value when the number of cached elements is
significantly (in 5-10 times) greater then the default hash table size (255).
.It Va keep-hot-count Oo Ar cachename Oc Op Ar value
The size limit of the cache with the given
.Ar cachename .
When it is exceeded, the policy will be applied.
The default value is 2048.
.It Va perform-actual-lookups Oo Ar cachename Oc Op Cm yes | no
If enabled, the
.Xr cached 8
does not simply receive and cache the NSS-requests results, but performs
all the lookups by itself and only returns the responses.
If this feature is enabled, then for the given
.Ar cachename
.Xr cached 8
will act similarly to the NSCD.
.Pp
.Sy NOTE :
this feature is currently experimental \[em] it supports only
.Dq Li passwd ,
.Dq Li groups
and
.Dq Li services
cachenames.
.El
.Sh NOTES
You can use the
.Ql #
symbol at the beginning of the line for comments.
.Sh FILES
.Bl -tag -width ".Pa /etc/cached.conf" -compact
.It Pa /etc/cached.conf
.El
.Sh SEE ALSO
.Xr cached 8
.Sh AUTHORS
.An Michael Bushkov
.Aq bushman@rsu.ru
.Sh BUGS
Please send bug reports and suggestions to
.Aq bushman@rsu.ru .

View File

@ -1,284 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/socket.h>
#include <sys/event.h>
#include <sys/uio.h>
#include <sys/un.h>
#include <assert.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "debug.h"
#include "cachedcli.h"
#include "protocol.h"
#define DEFAULT_CACHED_IO_TIMEOUT 4
static int safe_write(struct cached_connection_ *, const void *, size_t);
static int safe_read(struct cached_connection_ *, void *, size_t);
static int send_credentials(struct cached_connection_ *, int);
static int
safe_write(struct cached_connection_ *connection, const void *data,
size_t data_size)
{
struct kevent eventlist;
int nevents;
size_t result;
ssize_t s_result;
struct timespec timeout;
if (data_size == 0)
return (0);
timeout.tv_sec = DEFAULT_CACHED_IO_TIMEOUT;
timeout.tv_nsec = 0;
result = 0;
do {
nevents = kevent(connection->write_queue, NULL, 0, &eventlist,
1, &timeout);
if ((nevents == 1) && (eventlist.filter == EVFILT_WRITE)) {
s_result = write(connection->sockfd, data + result,
eventlist.data < data_size - result ?
eventlist.data : data_size - result);
if (s_result == -1)
return (-1);
else
result += s_result;
if (eventlist.flags & EV_EOF)
return (result < data_size ? -1 : 0);
} else
return (-1);
} while (result < data_size);
return (0);
}
static int
safe_read(struct cached_connection_ *connection, void *data, size_t data_size)
{
struct kevent eventlist;
size_t result;
ssize_t s_result;
struct timespec timeout;
int nevents;
if (data_size == 0)
return (0);
timeout.tv_sec = DEFAULT_CACHED_IO_TIMEOUT;
timeout.tv_nsec = 0;
result = 0;
do {
nevents = kevent(connection->read_queue, NULL, 0, &eventlist, 1,
&timeout);
if ((nevents == 1) && (eventlist.filter == EVFILT_READ)) {
s_result = read(connection->sockfd, data + result,
eventlist.data <= data_size - result ? eventlist.data :
data_size - result);
if (s_result == -1)
return (-1);
else
result += s_result;
if (eventlist.flags & EV_EOF)
return (result < data_size ? -1 : 0);
} else
return (-1);
} while (result < data_size);
return (0);
}
static int
send_credentials(struct cached_connection_ *connection, int type)
{
struct kevent eventlist;
int nevents;
ssize_t result;
int res;
struct msghdr cred_hdr;
struct iovec iov;
struct {
struct cmsghdr hdr;
struct cmsgcred creds;
} cmsg;
TRACE_IN(send_credentials);
memset(&cmsg, 0, sizeof(cmsg));
cmsg.hdr.cmsg_len = sizeof(cmsg);
cmsg.hdr.cmsg_level = SOL_SOCKET;
cmsg.hdr.cmsg_type = SCM_CREDS;
memset(&cred_hdr, 0, sizeof(struct msghdr));
cred_hdr.msg_iov = &iov;
cred_hdr.msg_iovlen = 1;
cred_hdr.msg_control = &cmsg;
cred_hdr.msg_controllen = sizeof(cmsg);
iov.iov_base = &type;
iov.iov_len = sizeof(int);
EV_SET(&eventlist, connection->sockfd, EVFILT_WRITE, EV_ADD,
NOTE_LOWAT, sizeof(int), NULL);
res = kevent(connection->write_queue, &eventlist, 1, NULL, 0, NULL);
nevents = kevent(connection->write_queue, NULL, 0, &eventlist, 1, NULL);
if ((nevents == 1) && (eventlist.filter == EVFILT_WRITE)) {
result = (sendmsg(connection->sockfd, &cred_hdr, 0) == -1) ? -1
: 0;
EV_SET(&eventlist, connection->sockfd, EVFILT_WRITE, EV_ADD,
0, 0, NULL);
kevent(connection->write_queue, &eventlist, 1, NULL, 0, NULL);
TRACE_OUT(send_credentials);
return (result);
} else {
TRACE_OUT(send_credentials);
return (-1);
}
}
struct cached_connection_ *
open_cached_connection__(struct cached_connection_params const *params)
{
struct cached_connection_ *retval;
struct kevent eventlist;
struct sockaddr_un client_address;
int client_address_len, client_socket;
int res;
TRACE_IN(open_cached_connection);
assert(params != NULL);
client_socket = socket(PF_LOCAL, SOCK_STREAM, 0);
client_address.sun_family = PF_LOCAL;
strncpy(client_address.sun_path, params->socket_path,
sizeof(client_address.sun_path));
client_address_len = sizeof(client_address.sun_family) +
strlen(client_address.sun_path) + 1;
res = connect(client_socket, (struct sockaddr *)&client_address,
client_address_len);
if (res == -1) {
close(client_socket);
TRACE_OUT(open_cached_connection);
return (NULL);
}
fcntl(client_socket, F_SETFL, O_NONBLOCK);
retval = malloc(sizeof(struct cached_connection_));
assert(retval != NULL);
memset(retval, 0, sizeof(struct cached_connection_));
retval->sockfd = client_socket;
retval->write_queue = kqueue();
assert(retval->write_queue != -1);
EV_SET(&eventlist, retval->sockfd, EVFILT_WRITE, EV_ADD,
0, 0, NULL);
res = kevent(retval->write_queue, &eventlist, 1, NULL, 0, NULL);
retval->read_queue = kqueue();
assert(retval->read_queue != -1);
EV_SET(&eventlist, retval->sockfd, EVFILT_READ, EV_ADD,
0, 0, NULL);
res = kevent(retval->read_queue, &eventlist, 1, NULL, 0, NULL);
TRACE_OUT(open_cached_connection);
return (retval);
}
void
close_cached_connection__(struct cached_connection_ *connection)
{
TRACE_IN(close_cached_connection);
assert(connection != NULL);
close(connection->sockfd);
close(connection->read_queue);
close(connection->write_queue);
free(connection);
TRACE_OUT(close_cached_connection);
}
int
cached_transform__(struct cached_connection_ *connection,
const char *entry_name, int transformation_type)
{
size_t name_size;
int error_code;
int result;
TRACE_IN(cached_transform);
error_code = -1;
result = 0;
result = send_credentials(connection, CET_TRANSFORM_REQUEST);
if (result != 0)
goto fin;
if (entry_name != NULL)
name_size = strlen(entry_name);
else
name_size = 0;
result = safe_write(connection, &name_size, sizeof(size_t));
if (result != 0)
goto fin;
result = safe_write(connection, &transformation_type, sizeof(int));
if (result != 0)
goto fin;
if (entry_name != NULL) {
result = safe_write(connection, entry_name, name_size);
if (result != 0)
goto fin;
}
result = safe_read(connection, &error_code, sizeof(int));
if (result != 0)
error_code = -1;
fin:
TRACE_OUT(cached_transform);
return (error_code);
}

View File

@ -1,57 +0,0 @@
/*-
* Copyright (c) 2004 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_CACHEDCLI_H__
#define __CACHED_CACHEDCLI_H__
struct cached_connection_params {
char *socket_path;
struct timeval timeout;
};
struct cached_connection_ {
int sockfd;
int read_queue;
int write_queue;
};
/* simple abstractions for not to write "struct" every time */
typedef struct cached_connection_ *cached_connection;
typedef struct cached_connection_ *cached_mp_write_session;
typedef struct cached_connection_ *cached_mp_read_session;
#define INVALID_CACHED_CONNECTION (NULL)
/* initialization/destruction routines */
extern cached_connection open_cached_connection__(
struct cached_connection_params const *);
extern void close_cached_connection__(cached_connection);
extern int cached_transform__(cached_connection, const char *, int);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,281 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_CACHELIB_H__
#define __CACHED_CACHELIB_H__
#include <sys/queue.h>
#include <sys/time.h>
#include <stdlib.h>
#include "hashtable.h"
#include "cacheplcs.h"
enum cache_entry_t {
CET_COMMON = 0, /* cache item is atomic */
CET_MULTIPART /* cache item is formed part by part */
};
enum cache_transformation_t {
CTT_FLUSH = 0, /* flush the cache - delete all obsolete items */
CTT_CLEAR = 1 /* delete all items in the cache */
};
/* cache deletion policy type enum */
enum cache_policy_t {
CPT_FIFO = 0, /* first-in first-out */
CPT_LRU = 1, /* least recently used */
CPT_LFU = 2 /* least frequently used */
};
/* multipart sessions can be used for reading and writing */
enum cache_mp_session_t {
CMPT_READ_SESSION,
CMPT_WRITE_SESSION
};
/*
* When doing partial transformations of entries (which are applied for
* elements with keys, that contain specified buffer in its left or
* right part), this enum will show the needed position of the key part.
*/
enum part_position_t {
KPPT_LEFT,
KPPT_RIGHT
};
/* num_levels attribute is obsolete, i think - user can always emulate it
* by using one entry.
* get_time_func is needed to have the clocks-independent counter
*/
struct cache_params
{
void (*get_time_func)(struct timeval *);
};
/*
* base structure - normal_cache_entry_params and multipart_cache_entry_params
* are "inherited" from it
*/
struct cache_entry_params
{
enum cache_entry_t entry_type;
char *entry_name;
};
/* params, used for most entries */
struct common_cache_entry_params
{
/* inherited fields */
enum cache_entry_t entry_type;
/* unique fields */
char *entry_name;
size_t cache_entries_size;
size_t max_elemsize; /* if 0 then no check is made */
size_t satisf_elemsize; /* if entry size is exceeded,
* this number of elements will be left,
* others will be deleted */
struct timeval max_lifetime; /* if 0 then no check is made */
enum cache_policy_t policy; /* policy used for transformations */
};
/* params, used for multipart entries */
struct mp_cache_entry_params
{
/* inherited fields */
enum cache_entry_t entry_type;
char *entry_name;
/* unique fields */
size_t max_elemsize; /* if 0 then no check is made */
size_t max_sessions; /* maximum number of active sessions */
struct timeval max_lifetime; /* maximum elements lifetime */
};
struct cache_ht_item_data_
{
/* key is the bytes sequence only - not the null-terminated string */
char *key;
size_t key_size;
char *value;
size_t value_size;
struct cache_policy_item_ *fifo_policy_item;
};
struct cache_ht_item_
{
HASHTABLE_ENTRY_HEAD(ht_item_, struct cache_ht_item_data_) data;
};
struct cache_entry_
{
char *name;
struct cache_entry_params *params;
};
struct cache_common_entry_
{
char *name;
struct cache_entry_params *params;
struct common_cache_entry_params common_params;
HASHTABLE_HEAD(cache_ht_, cache_ht_item_) items;
size_t items_size;
/*
* Entry always has the FIFO policy, that is used to eliminate old
* elements (the ones, with lifetime more than max_lifetime). Besides,
* user can specify another policy to be applied, when there are too
* many elements in the entry. So policies_size can be 1 or 2.
*/
struct cache_policy_ **policies;
size_t policies_size;
void (*get_time_func)(struct timeval *);
};
struct cache_mp_data_item_ {
char *value;
size_t value_size;
TAILQ_ENTRY(cache_mp_data_item_) entries;
};
struct cache_mp_write_session_
{
struct cache_mp_entry_ *parent_entry;
/*
* All items are accumulated in this queue. When the session is
* committed, they all will be copied to the multipart entry.
*/
TAILQ_HEAD(cache_mp_data_item_head, cache_mp_data_item_) items;
size_t items_size;
TAILQ_ENTRY(cache_mp_write_session_) entries;
};
struct cache_mp_read_session_
{
struct cache_mp_entry_ *parent_entry;
struct cache_mp_data_item_ *current_item;
TAILQ_ENTRY(cache_mp_read_session_) entries;
};
struct cache_mp_entry_
{
char *name;
struct cache_entry_params *params;
struct mp_cache_entry_params mp_params;
/* All opened write sessions */
TAILQ_HEAD(write_sessions_head, cache_mp_write_session_) ws_head;
size_t ws_size;
/* All opened read sessions */
TAILQ_HEAD(read_sessions_head, cache_mp_read_session_) rs_head;
size_t rs_size;
/*
* completed_write_session is the committed write sessions. All read
* sessions use data from it. If the completed_write_session is out of
* date, but still in use by some of the read sessions, the newly
* committed write session is stored in the pending_write_session.
* In such a case, completed_write_session will be substituted with
* pending_write_session as soon as it won't be used by any of
* the read sessions.
*/
struct cache_mp_write_session_ *completed_write_session;
struct cache_mp_write_session_ *pending_write_session;
struct timeval creation_time;
struct timeval last_request_time;
void (*get_time_func)(struct timeval *);
};
struct cache_
{
struct cache_params params;
struct cache_entry_ **entries;
size_t entries_capacity;
size_t entries_size;
};
/* simple abstractions - for not to write "struct" every time */
typedef struct cache_ *cache;
typedef struct cache_entry_ *cache_entry;
typedef struct cache_mp_write_session_ *cache_mp_write_session;
typedef struct cache_mp_read_session_ *cache_mp_read_session;
#define INVALID_CACHE (NULL)
#define INVALID_CACHE_ENTRY (NULL)
#define INVALID_CACHE_MP_WRITE_SESSION (NULL)
#define INVALID_CACHE_MP_READ_SESSION (NULL)
/*
* NOTE: all cache operations are thread-unsafe. You must ensure thread-safety
* externally, by yourself.
*/
/* cache initialization/destruction routines */
extern cache init_cache(struct cache_params const *);
extern void destroy_cache(cache);
/* cache entries manipulation routines */
extern int register_cache_entry(cache, struct cache_entry_params const *);
extern int unregister_cache_entry(cache, const char *);
extern cache_entry find_cache_entry(cache, const char *);
/* read/write operations used on common entries */
extern int cache_read(cache_entry, const char *, size_t, char *, size_t *);
extern int cache_write(cache_entry, const char *, size_t, char const *, size_t);
/* read/write operations used on multipart entries */
extern cache_mp_write_session open_cache_mp_write_session(cache_entry);
extern int cache_mp_write(cache_mp_write_session, char *, size_t);
extern void abandon_cache_mp_write_session(cache_mp_write_session);
extern void close_cache_mp_write_session(cache_mp_write_session);
extern cache_mp_read_session open_cache_mp_read_session(cache_entry);
extern int cache_mp_read(cache_mp_read_session, char *, size_t *);
extern void close_cache_mp_read_session(cache_mp_read_session);
/* transformation routines */
extern int transform_cache_entry(cache_entry, enum cache_transformation_t);
extern int transform_cache_entry_part(cache_entry, enum cache_transformation_t,
const char *, size_t, enum part_position_t);
#endif

View File

@ -1,590 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <string.h>
#include "cacheplcs.h"
#include "debug.h"
static void cache_fifo_policy_update_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static void cache_lfu_policy_add_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static struct cache_policy_item_ * cache_lfu_policy_create_item(void);
static void cache_lfu_policy_destroy_item(struct cache_policy_item_ *);
static struct cache_policy_item_ *cache_lfu_policy_get_first_item(
struct cache_policy_ *);
static struct cache_policy_item_ *cache_lfu_policy_get_last_item(
struct cache_policy_ *);
static struct cache_policy_item_ *cache_lfu_policy_get_next_item(
struct cache_policy_ *, struct cache_policy_item_ *);
static struct cache_policy_item_ *cache_lfu_policy_get_prev_item(
struct cache_policy_ *, struct cache_policy_item_ *);
static void cache_lfu_policy_remove_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static void cache_lfu_policy_update_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static void cache_lru_policy_update_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static void cache_queue_policy_add_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static struct cache_policy_item_ * cache_queue_policy_create_item();
static void cache_queue_policy_destroy_item(struct cache_policy_item_ *);
static struct cache_policy_item_ *cache_queue_policy_get_first_item(
struct cache_policy_ *);
static struct cache_policy_item_ *cache_queue_policy_get_last_item(
struct cache_policy_ *);
static struct cache_policy_item_ *cache_queue_policy_get_next_item(
struct cache_policy_ *, struct cache_policy_item_ *);
static struct cache_policy_item_ *cache_queue_policy_get_prev_item(
struct cache_policy_ *, struct cache_policy_item_ *);
static void cache_queue_policy_remove_item(struct cache_policy_ *,
struct cache_policy_item_ *);
static void destroy_cache_queue_policy(struct cache_queue_policy_ *);
static struct cache_queue_policy_ *init_cache_queue_policy(void);
/*
* All cache_queue_policy_XXX functions below will be used to fill
* the cache_queue_policy structure. They implement the most functionality of
* LRU and FIFO policies. LRU and FIFO policies are actually the
* cache_queue_policy_ with cache_update_item function changed.
*/
static struct cache_policy_item_ *
cache_queue_policy_create_item()
{
struct cache_queue_policy_item_ *retval;
TRACE_IN(cache_queue_policy_create_item);
retval = (struct cache_queue_policy_item_ *)malloc(
sizeof(struct cache_queue_policy_item_));
assert(retval != NULL);
memset(retval, 0, sizeof(struct cache_queue_policy_item_));
TRACE_OUT(cache_queue_policy_create_item);
return ((struct cache_policy_item_ *)retval);
}
static void
cache_queue_policy_destroy_item(struct cache_policy_item_ *item)
{
TRACE_IN(cache_queue_policy_destroy_item);
assert(item != NULL);
free(item);
TRACE_OUT(cache_queue_policy_destroy_item);
}
static void
cache_queue_policy_add_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_queue_policy_ *queue_policy;
struct cache_queue_policy_item_ *queue_item;
TRACE_IN(cache_queue_policy_add_item);
queue_policy = (struct cache_queue_policy_ *)policy;
queue_item = (struct cache_queue_policy_item_ *)item;
TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
TRACE_OUT(cache_queue_policy_add_item);
}
static void
cache_queue_policy_remove_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_queue_policy_ *queue_policy;
struct cache_queue_policy_item_ *queue_item;
TRACE_IN(cache_queue_policy_remove_item);
queue_policy = (struct cache_queue_policy_ *)policy;
queue_item = (struct cache_queue_policy_item_ *)item;
TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
TRACE_OUT(cache_queue_policy_remove_item);
}
static struct cache_policy_item_ *
cache_queue_policy_get_first_item(struct cache_policy_ *policy)
{
struct cache_queue_policy_ *queue_policy;
TRACE_IN(cache_queue_policy_get_first_item);
queue_policy = (struct cache_queue_policy_ *)policy;
TRACE_OUT(cache_queue_policy_get_first_item);
return ((struct cache_policy_item_ *)TAILQ_FIRST(&queue_policy->head));
}
static struct cache_policy_item_ *
cache_queue_policy_get_last_item(struct cache_policy_ *policy)
{
struct cache_queue_policy_ *queue_policy;
TRACE_IN(cache_queue_policy_get_last_item);
queue_policy = (struct cache_queue_policy_ *)policy;
TRACE_OUT(cache_queue_policy_get_last_item);
return ((struct cache_policy_item_ *)TAILQ_LAST(&queue_policy->head,
cache_queue_policy_head_));
}
static struct cache_policy_item_ *
cache_queue_policy_get_next_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_queue_policy_ *queue_policy;
struct cache_queue_policy_item_ *queue_item;
TRACE_IN(cache_queue_policy_get_next_item);
queue_policy = (struct cache_queue_policy_ *)policy;
queue_item = (struct cache_queue_policy_item_ *)item;
TRACE_OUT(cache_queue_policy_get_next_item);
return ((struct cache_policy_item_ *)TAILQ_NEXT(queue_item, entries));
}
static struct cache_policy_item_ *
cache_queue_policy_get_prev_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_queue_policy_ *queue_policy;
struct cache_queue_policy_item_ *queue_item;
TRACE_IN(cache_queue_policy_get_prev_item);
queue_policy = (struct cache_queue_policy_ *)policy;
queue_item = (struct cache_queue_policy_item_ *)item;
TRACE_OUT(cache_queue_policy_get_prev_item);
return ((struct cache_policy_item_ *)TAILQ_PREV(queue_item,
cache_queue_policy_head_, entries));
}
/*
* Initializes cache_queue_policy_ by filling the structure with the functions
* pointers, defined above
*/
static struct cache_queue_policy_ *
init_cache_queue_policy(void)
{
struct cache_queue_policy_ *retval;
TRACE_IN(init_cache_queue_policy);
retval = (struct cache_queue_policy_ *)malloc(
sizeof(struct cache_queue_policy_));
assert(retval != NULL);
memset(retval, 0, sizeof(struct cache_queue_policy_));
retval->parent_data.create_item_func = cache_queue_policy_create_item;
retval->parent_data.destroy_item_func = cache_queue_policy_destroy_item;
retval->parent_data.add_item_func = cache_queue_policy_add_item;
retval->parent_data.remove_item_func = cache_queue_policy_remove_item;
retval->parent_data.get_first_item_func =
cache_queue_policy_get_first_item;
retval->parent_data.get_last_item_func =
cache_queue_policy_get_last_item;
retval->parent_data.get_next_item_func =
cache_queue_policy_get_next_item;
retval->parent_data.get_prev_item_func =
cache_queue_policy_get_prev_item;
TAILQ_INIT(&retval->head);
TRACE_OUT(init_cache_queue_policy);
return (retval);
}
static void
destroy_cache_queue_policy(struct cache_queue_policy_ *queue_policy)
{
struct cache_queue_policy_item_ *queue_item;
TRACE_IN(destroy_cache_queue_policy);
while (!TAILQ_EMPTY(&queue_policy->head)) {
queue_item = TAILQ_FIRST(&queue_policy->head);
TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
cache_queue_policy_destroy_item(
(struct cache_policy_item_ *)queue_item);
}
free(queue_policy);
TRACE_OUT(destroy_cache_queue_policy);
}
/*
* Makes cache_queue_policy_ behave like FIFO policy - we don't do anything,
* when the cache element is updated. So it always stays in its initial
* position in the queue - that is exactly the FIFO functionality.
*/
static void
cache_fifo_policy_update_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
TRACE_IN(cache_fifo_policy_update_item);
/* policy and item arguments are ignored */
TRACE_OUT(cache_fifo_policy_update_item);
}
struct cache_policy_ *
init_cache_fifo_policy()
{
struct cache_queue_policy_ *retval;
TRACE_IN(init_cache_fifo_policy);
retval = init_cache_queue_policy();
retval->parent_data.update_item_func = cache_fifo_policy_update_item;
TRACE_OUT(init_cache_fifo_policy);
return ((struct cache_policy_ *)retval);
}
void
destroy_cache_fifo_policy(struct cache_policy_ *policy)
{
struct cache_queue_policy_ *queue_policy;
TRACE_IN(destroy_cache_fifo_policy);
queue_policy = (struct cache_queue_policy_ *)policy;
destroy_cache_queue_policy(queue_policy);
TRACE_OUT(destroy_cache_fifo_policy);
}
/*
* Makes cache_queue_policy_ behave like LRU policy. On each update, cache
* element is moved to the end of the queue - so it would be deleted in last
* turn. That is exactly the LRU policy functionality.
*/
static void
cache_lru_policy_update_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_queue_policy_ *queue_policy;
struct cache_queue_policy_item_ *queue_item;
TRACE_IN(cache_lru_policy_update_item);
queue_policy = (struct cache_queue_policy_ *)policy;
queue_item = (struct cache_queue_policy_item_ *)item;
TAILQ_REMOVE(&queue_policy->head, queue_item, entries);
TAILQ_INSERT_TAIL(&queue_policy->head, queue_item, entries);
TRACE_OUT(cache_lru_policy_update_item);
}
struct cache_policy_ *
init_cache_lru_policy()
{
struct cache_queue_policy_ *retval;
TRACE_IN(init_cache_lru_policy);
retval = init_cache_queue_policy();
retval->parent_data.update_item_func = cache_lru_policy_update_item;
TRACE_OUT(init_cache_lru_policy);
return ((struct cache_policy_ *)retval);
}
void
destroy_cache_lru_policy(struct cache_policy_ *policy)
{
struct cache_queue_policy_ *queue_policy;
TRACE_IN(destroy_cache_lru_policy);
queue_policy = (struct cache_queue_policy_ *)policy;
destroy_cache_queue_policy(queue_policy);
TRACE_OUT(destroy_cache_lru_policy);
}
/*
* LFU (least frequently used) policy implementation differs much from the
* LRU and FIFO (both based on cache_queue_policy_). Almost all cache_policy_
* functions are implemented specifically for this policy. The idea of this
* policy is to represent frequency (real number) as the integer number and
* use it as the index in the array. Each array's element is
* the list of elements. For example, if we have the 100-elements
* array for this policy, the elements with frequency 0.1 (calls per-second)
* would be in 10th element of the array.
*/
static struct cache_policy_item_ *
cache_lfu_policy_create_item(void)
{
struct cache_lfu_policy_item_ *retval;
TRACE_IN(cache_lfu_policy_create_item);
retval = (struct cache_lfu_policy_item_ *)malloc(
sizeof(struct cache_lfu_policy_item_));
assert(retval != NULL);
memset(retval, 0, sizeof(struct cache_lfu_policy_item_));
TRACE_OUT(cache_lfu_policy_create_item);
return ((struct cache_policy_item_ *)retval);
}
static void
cache_lfu_policy_destroy_item(struct cache_policy_item_ *item)
{
TRACE_IN(cache_lfu_policy_destroy_item);
assert(item != NULL);
free(item);
TRACE_OUT(cache_lfu_policy_destroy_item);
}
/*
* When placed in the LFU policy queue for the first time, the maximum
* frequency is assigned to the element
*/
static void
cache_lfu_policy_add_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
TRACE_IN(cache_lfu_policy_add_item);
lfu_policy = (struct cache_lfu_policy_ *)policy;
lfu_item = (struct cache_lfu_policy_item_ *)item;
lfu_item->frequency = CACHELIB_MAX_FREQUENCY - 1;
TAILQ_INSERT_HEAD(&(lfu_policy->groups[CACHELIB_MAX_FREQUENCY - 1]),
lfu_item, entries);
TRACE_OUT(cache_lfu_policy_add_item);
}
/*
* On each update the frequency of the element is recalculated and, if it
* changed, the element would be moved to the another place in the array.
*/
static void
cache_lfu_policy_update_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
int index;
TRACE_IN(cache_lfu_policy_update_item);
lfu_policy = (struct cache_lfu_policy_ *)policy;
lfu_item = (struct cache_lfu_policy_item_ *)item;
/*
* We calculate the square of the request_count to avoid grouping of
* all elements at the start of the array (for example, if array size is
* 100 and most of its elements has frequency below the 0.01, they
* all would be grouped in the first array's position). Other
* techniques should be used here later to ensure, that elements are
* equally distributed in the array and not grouped in its beginning.
*/
if (lfu_item->parent_data.last_request_time.tv_sec !=
lfu_item->parent_data.creation_time.tv_sec) {
index = ((double)lfu_item->parent_data.request_count *
(double)lfu_item->parent_data.request_count /
(lfu_item->parent_data.last_request_time.tv_sec -
lfu_item->parent_data.creation_time.tv_sec + 1)) *
CACHELIB_MAX_FREQUENCY;
if (index >= CACHELIB_MAX_FREQUENCY)
index = CACHELIB_MAX_FREQUENCY - 1;
} else
index = CACHELIB_MAX_FREQUENCY - 1;
TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
entries);
lfu_item->frequency = index;
TAILQ_INSERT_HEAD(&(lfu_policy->groups[index]), lfu_item, entries);
TRACE_OUT(cache_lfu_policy_update_item);
}
static void
cache_lfu_policy_remove_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
TRACE_IN(cache_lfu_policy_remove_item);
lfu_policy = (struct cache_lfu_policy_ *)policy;
lfu_item = (struct cache_lfu_policy_item_ *)item;
TAILQ_REMOVE(&(lfu_policy->groups[lfu_item->frequency]), lfu_item,
entries);
TRACE_OUT(cache_lfu_policy_remove_item);
}
static struct cache_policy_item_ *
cache_lfu_policy_get_first_item(struct cache_policy_ *policy)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
int i;
TRACE_IN(cache_lfu_policy_get_first_item);
lfu_item = NULL;
lfu_policy = (struct cache_lfu_policy_ *)policy;
for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
break;
}
TRACE_OUT(cache_lfu_policy_get_first_item);
return ((struct cache_policy_item_ *)lfu_item);
}
static struct cache_policy_item_ *
cache_lfu_policy_get_last_item(struct cache_policy_ *policy)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
int i;
TRACE_IN(cache_lfu_policy_get_last_item);
lfu_item = NULL;
lfu_policy = (struct cache_lfu_policy_ *)policy;
for (i = CACHELIB_MAX_FREQUENCY - 1; i >= 0; --i)
if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
cache_lfu_policy_group_);
break;
}
TRACE_OUT(cache_lfu_policy_get_last_item);
return ((struct cache_policy_item_ *)lfu_item);
}
static struct cache_policy_item_ *
cache_lfu_policy_get_next_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
int i;
TRACE_IN(cache_lfu_policy_get_next_item);
lfu_policy = (struct cache_lfu_policy_ *)policy;
lfu_item = TAILQ_NEXT((struct cache_lfu_policy_item_ *)item, entries);
if (lfu_item == NULL)
{
for (i = ((struct cache_lfu_policy_item_ *)item)->frequency + 1;
i < CACHELIB_MAX_FREQUENCY; ++i) {
if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
break;
}
}
}
TRACE_OUT(cache_lfu_policy_get_next_item);
return ((struct cache_policy_item_ *)lfu_item);
}
static struct cache_policy_item_ *
cache_lfu_policy_get_prev_item(struct cache_policy_ *policy,
struct cache_policy_item_ *item)
{
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
int i;
TRACE_IN(cache_lfu_policy_get_prev_item);
lfu_policy = (struct cache_lfu_policy_ *)policy;
lfu_item = TAILQ_PREV((struct cache_lfu_policy_item_ *)item,
cache_lfu_policy_group_, entries);
if (lfu_item == NULL)
{
for (i = ((struct cache_lfu_policy_item_ *)item)->frequency - 1;
i >= 0; --i)
if (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
lfu_item = TAILQ_LAST(&(lfu_policy->groups[i]),
cache_lfu_policy_group_);
break;
}
}
TRACE_OUT(cache_lfu_policy_get_prev_item);
return ((struct cache_policy_item_ *)lfu_item);
}
/*
* Initializes the cache_policy_ structure by filling it with appropriate
* functions pointers
*/
struct cache_policy_ *
init_cache_lfu_policy()
{
int i;
struct cache_lfu_policy_ *retval;
TRACE_IN(init_cache_lfu_policy);
retval = (struct cache_lfu_policy_ *)malloc(
sizeof(struct cache_lfu_policy_));
assert(retval != NULL);
memset(retval, 0, sizeof(struct cache_lfu_policy_));
retval->parent_data.create_item_func = cache_lfu_policy_create_item;
retval->parent_data.destroy_item_func = cache_lfu_policy_destroy_item;
retval->parent_data.add_item_func = cache_lfu_policy_add_item;
retval->parent_data.update_item_func = cache_lfu_policy_update_item;
retval->parent_data.remove_item_func = cache_lfu_policy_remove_item;
retval->parent_data.get_first_item_func =
cache_lfu_policy_get_first_item;
retval->parent_data.get_last_item_func =
cache_lfu_policy_get_last_item;
retval->parent_data.get_next_item_func =
cache_lfu_policy_get_next_item;
retval->parent_data.get_prev_item_func =
cache_lfu_policy_get_prev_item;
for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i)
TAILQ_INIT(&(retval->groups[i]));
TRACE_OUT(init_cache_lfu_policy);
return ((struct cache_policy_ *)retval);
}
void
destroy_cache_lfu_policy(struct cache_policy_ *policy)
{
int i;
struct cache_lfu_policy_ *lfu_policy;
struct cache_lfu_policy_item_ *lfu_item;
TRACE_IN(destroy_cache_lfu_policy);
lfu_policy = (struct cache_lfu_policy_ *)policy;
for (i = 0; i < CACHELIB_MAX_FREQUENCY; ++i) {
while (!TAILQ_EMPTY(&(lfu_policy->groups[i]))) {
lfu_item = TAILQ_FIRST(&(lfu_policy->groups[i]));
TAILQ_REMOVE(&(lfu_policy->groups[i]), lfu_item,
entries);
cache_lfu_policy_destroy_item(
(struct cache_policy_item_ *)lfu_item);
}
}
free(policy);
TRACE_OUT(destroy_cache_lfu_policy);
}

View File

@ -1,137 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_CACHEPLCS_H__
#define __CACHED_CACHEPLCS_H__
#include <sys/queue.h>
#include <sys/time.h>
#include <stdlib.h>
/* common policy definitions */
#define CACHELIB_MAX_FREQUENCY 100
/*
* cache_policy_item_ represents some abstract cache element in the policy
* queue. connected_item pointers to the corresponding cache_policy_item_ in
* another policy queue.
*/
struct cache_policy_item_
{
char *key;
size_t key_size;
size_t request_count;
struct timeval last_request_time;
struct timeval creation_time;
struct cache_policy_item_ *connected_item;
};
/*
* cache_policy_ represents an abstract policy queue. It can be customized by
* setting appropriate function pointers
*/
struct cache_policy_
{
struct cache_policy_item_* (*create_item_func)();
void (*destroy_item_func)(struct cache_policy_item_ *);
void (*add_item_func)(struct cache_policy_ *,
struct cache_policy_item_ *);
void (*remove_item_func)(struct cache_policy_ *,
struct cache_policy_item_ *);
void (*update_item_func)(struct cache_policy_ *,
struct cache_policy_item_ *);
struct cache_policy_item_ *(*get_first_item_func)(
struct cache_policy_ *);
struct cache_policy_item_ *(*get_last_item_func)(
struct cache_policy_ *);
struct cache_policy_item_ *(*get_next_item_func)(
struct cache_policy_ *, struct cache_policy_item_ *);
struct cache_policy_item_ *(*get_prev_item_func)(
struct cache_policy_ *, struct cache_policy_item_ *);
};
/*
* LFU cache policy item "inherited" from cache_policy_item_ structure
*/
struct cache_lfu_policy_item_
{
struct cache_policy_item_ parent_data;
int frequency;
TAILQ_ENTRY(cache_lfu_policy_item_) entries;
};
TAILQ_HEAD(cache_lfu_policy_group_, cache_lfu_policy_item_);
/*
* LFU policy queue "inherited" from cache_policy_.
*/
struct cache_lfu_policy_
{
struct cache_policy_ parent_data;
struct cache_lfu_policy_group_ groups[CACHELIB_MAX_FREQUENCY];
};
/*
* LRU and FIFO policies item "inherited" from cache_policy_item_
*/
struct cache_queue_policy_item_
{
struct cache_policy_item_ parent_data;
TAILQ_ENTRY(cache_queue_policy_item_) entries;
};
/*
* LRU and FIFO policies "inherited" from cache_policy_
*/
struct cache_queue_policy_
{
struct cache_policy_ parent_data;
TAILQ_HEAD(cache_queue_policy_head_, cache_queue_policy_item_) head;
};
typedef struct cache_queue_policy_ cache_fifo_policy_;
typedef struct cache_queue_policy_ cache_lru_policy_;
/* fifo policy routines */
extern struct cache_policy_ *init_cache_fifo_policy();
extern void destroy_cache_fifo_policy(struct cache_policy_ *);
/* lru policy routines */
extern struct cache_policy_ *init_cache_lru_policy();
extern void destroy_cache_lru_policy(struct cache_policy_ *);
/* lfu policy routines */
extern struct cache_policy_ *init_cache_lfu_policy();
extern void destroy_cache_lfu_policy(struct cache_policy_ *);
#endif

View File

@ -1,588 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "config.h"
#include "debug.h"
#include "log.h"
/*
* Default entries, which always exist in the configuration
*/
const char *c_default_entries[6] = {
NSDB_PASSWD,
NSDB_GROUP,
NSDB_HOSTS,
NSDB_SERVICES,
NSDB_PROTOCOLS,
NSDB_RPC
};
static int configuration_entry_cmp(const void *, const void *);
static int configuration_entry_sort_cmp(const void *, const void *);
static int configuration_entry_cache_mp_sort_cmp(const void *, const void *);
static int configuration_entry_cache_mp_cmp(const void *, const void *);
static int configuration_entry_cache_mp_part_cmp(const void *, const void *);
static struct configuration_entry *create_configuration_entry(const char *,
struct timeval const *, struct timeval const *,
struct common_cache_entry_params const *,
struct common_cache_entry_params const *,
struct mp_cache_entry_params const *);
static int
configuration_entry_sort_cmp(const void *e1, const void *e2)
{
return (strcmp((*((struct configuration_entry **)e1))->name,
(*((struct configuration_entry **)e2))->name
));
}
static int
configuration_entry_cmp(const void *e1, const void *e2)
{
return (strcmp((const char *)e1,
(*((struct configuration_entry **)e2))->name
));
}
static int
configuration_entry_cache_mp_sort_cmp(const void *e1, const void *e2)
{
return (strcmp((*((cache_entry *)e1))->params->entry_name,
(*((cache_entry *)e2))->params->entry_name
));
}
static int
configuration_entry_cache_mp_cmp(const void *e1, const void *e2)
{
return (strcmp((const char *)e1,
(*((cache_entry *)e2))->params->entry_name
));
}
static int
configuration_entry_cache_mp_part_cmp(const void *e1, const void *e2)
{
return (strncmp((const char *)e1,
(*((cache_entry *)e2))->params->entry_name,
strlen((const char *)e1)
));
}
static struct configuration_entry *
create_configuration_entry(const char *name,
struct timeval const *common_timeout,
struct timeval const *mp_timeout,
struct common_cache_entry_params const *positive_params,
struct common_cache_entry_params const *negative_params,
struct mp_cache_entry_params const *mp_params)
{
struct configuration_entry *retval;
size_t size;
int res;
TRACE_IN(create_configuration_entry);
assert(name != NULL);
assert(positive_params != NULL);
assert(negative_params != NULL);
assert(mp_params != NULL);
retval = (struct configuration_entry *)malloc(
sizeof(struct configuration_entry));
assert(retval != NULL);
memset(retval, 0, sizeof(struct configuration_entry));
res = pthread_mutex_init(&retval->positive_cache_lock, NULL);
if (res != 0) {
free(retval);
LOG_ERR_2("create_configuration_entry",
"can't create positive cache lock");
TRACE_OUT(create_configuration_entry);
return (NULL);
}
res = pthread_mutex_init(&retval->negative_cache_lock, NULL);
if (res != 0) {
pthread_mutex_destroy(&retval->positive_cache_lock);
free(retval);
LOG_ERR_2("create_configuration_entry",
"can't create negative cache lock");
TRACE_OUT(create_configuration_entry);
return (NULL);
}
res = pthread_mutex_init(&retval->mp_cache_lock, NULL);
if (res != 0) {
pthread_mutex_destroy(&retval->positive_cache_lock);
pthread_mutex_destroy(&retval->negative_cache_lock);
free(retval);
LOG_ERR_2("create_configuration_entry",
"can't create negative cache lock");
TRACE_OUT(create_configuration_entry);
return (NULL);
}
memcpy(&retval->positive_cache_params, positive_params,
sizeof(struct common_cache_entry_params));
memcpy(&retval->negative_cache_params, negative_params,
sizeof(struct common_cache_entry_params));
memcpy(&retval->mp_cache_params, mp_params,
sizeof(struct mp_cache_entry_params));
size = strlen(name);
retval->name = (char *)malloc(size + 1);
assert(retval->name != NULL);
memset(retval->name, 0, size + 1);
memcpy(retval->name, name, size);
memcpy(&retval->common_query_timeout, common_timeout,
sizeof(struct timeval));
memcpy(&retval->mp_query_timeout, mp_timeout,
sizeof(struct timeval));
asprintf(&retval->positive_cache_params.entry_name, "%s+", name);
assert(retval->positive_cache_params.entry_name != NULL);
asprintf(&retval->negative_cache_params.entry_name, "%s-", name);
assert(retval->negative_cache_params.entry_name != NULL);
asprintf(&retval->mp_cache_params.entry_name, "%s*", name);
assert(retval->mp_cache_params.entry_name != NULL);
TRACE_OUT(create_configuration_entry);
return (retval);
}
/*
* Creates configuration entry and fills it with default values
*/
struct configuration_entry *
create_def_configuration_entry(const char *name)
{
struct common_cache_entry_params positive_params, negative_params;
struct mp_cache_entry_params mp_params;
struct timeval default_common_timeout, default_mp_timeout;
struct configuration_entry *res = NULL;
TRACE_IN(create_def_configuration_entry);
memset(&positive_params, 0,
sizeof(struct common_cache_entry_params));
positive_params.entry_type = CET_COMMON;
positive_params.cache_entries_size = DEFAULT_CACHE_HT_SIZE;
positive_params.max_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE;
positive_params.satisf_elemsize = DEFAULT_POSITIVE_ELEMENTS_SIZE / 2;
positive_params.max_lifetime.tv_sec = DEFAULT_POSITIVE_LIFETIME;
positive_params.policy = CPT_LRU;
memcpy(&negative_params, &positive_params,
sizeof(struct common_cache_entry_params));
negative_params.max_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE;
negative_params.satisf_elemsize = DEFAULT_NEGATIVE_ELEMENTS_SIZE / 2;
negative_params.max_lifetime.tv_sec = DEFAULT_NEGATIVE_LIFETIME;
negative_params.policy = CPT_FIFO;
memset(&default_common_timeout, 0, sizeof(struct timeval));
default_common_timeout.tv_sec = DEFAULT_COMMON_ENTRY_TIMEOUT;
memset(&default_mp_timeout, 0, sizeof(struct timeval));
default_mp_timeout.tv_sec = DEFAULT_MP_ENTRY_TIMEOUT;
memset(&mp_params, 0,
sizeof(struct mp_cache_entry_params));
mp_params.entry_type = CET_MULTIPART;
mp_params.max_elemsize = DEFAULT_MULTIPART_ELEMENTS_SIZE;
mp_params.max_sessions = DEFAULT_MULITPART_SESSIONS_SIZE;
mp_params.max_lifetime.tv_sec = DEFAULT_MULITPART_LIFETIME;
res = create_configuration_entry(name, &default_common_timeout,
&default_mp_timeout, &positive_params, &negative_params,
&mp_params);
TRACE_OUT(create_def_configuration_entry);
return (res);
}
void
destroy_configuration_entry(struct configuration_entry *entry)
{
TRACE_IN(destroy_configuration_entry);
assert(entry != NULL);
pthread_mutex_destroy(&entry->positive_cache_lock);
pthread_mutex_destroy(&entry->negative_cache_lock);
pthread_mutex_destroy(&entry->mp_cache_lock);
free(entry->name);
free(entry->positive_cache_params.entry_name);
free(entry->negative_cache_params.entry_name);
free(entry->mp_cache_params.entry_name);
free(entry->mp_cache_entries);
free(entry);
TRACE_OUT(destroy_configuration_entry);
}
int
add_configuration_entry(struct configuration *config,
struct configuration_entry *entry)
{
TRACE_IN(add_configuration_entry);
assert(entry != NULL);
assert(entry->name != NULL);
if (configuration_find_entry(config, entry->name) != NULL) {
TRACE_OUT(add_configuration_entry);
return (-1);
}
if (config->entries_size == config->entries_capacity) {
struct configuration_entry **new_entries;
config->entries_capacity *= 2;
new_entries = (struct configuration_entry **)malloc(
sizeof(struct configuration_entry *) *
config->entries_capacity);
assert(new_entries != NULL);
memset(new_entries, 0, sizeof(struct configuration_entry *) *
config->entries_capacity);
memcpy(new_entries, config->entries,
sizeof(struct configuration_entry *) *
config->entries_size);
free(config->entries);
config->entries = new_entries;
}
config->entries[config->entries_size++] = entry;
qsort(config->entries, config->entries_size,
sizeof(struct configuration_entry *),
configuration_entry_sort_cmp);
TRACE_OUT(add_configuration_entry);
return (0);
}
size_t
configuration_get_entries_size(struct configuration *config)
{
TRACE_IN(configuration_get_entries_size);
assert(config != NULL);
TRACE_OUT(configuration_get_entries_size);
return (config->entries_size);
}
struct configuration_entry *
configuration_get_entry(struct configuration *config, size_t index)
{
TRACE_IN(configuration_get_entry);
assert(config != NULL);
assert(index < config->entries_size);
TRACE_OUT(configuration_get_entry);
return (config->entries[index]);
}
struct configuration_entry *
configuration_find_entry(struct configuration *config,
const char *name)
{
struct configuration_entry **retval;
TRACE_IN(configuration_find_entry);
retval = bsearch(name, config->entries, config->entries_size,
sizeof(struct configuration_entry *), configuration_entry_cmp);
TRACE_OUT(configuration_find_entry);
return ((retval != NULL) ? *retval : NULL);
}
/*
* All multipart cache entries are stored in the configuration_entry in the
* sorted array (sorted by names). The 3 functions below manage this array.
*/
int
configuration_entry_add_mp_cache_entry(struct configuration_entry *config_entry,
cache_entry c_entry)
{
cache_entry *new_mp_entries, *old_mp_entries;
TRACE_IN(configuration_entry_add_mp_cache_entry);
++config_entry->mp_cache_entries_size;
new_mp_entries = (cache_entry *)malloc(sizeof(cache_entry) *
config_entry->mp_cache_entries_size);
assert(new_mp_entries != NULL);
new_mp_entries[0] = c_entry;
if (config_entry->mp_cache_entries_size - 1 > 0) {
memcpy(new_mp_entries + 1,
config_entry->mp_cache_entries,
(config_entry->mp_cache_entries_size - 1) *
sizeof(cache_entry));
}
old_mp_entries = config_entry->mp_cache_entries;
config_entry->mp_cache_entries = new_mp_entries;
free(old_mp_entries);
qsort(config_entry->mp_cache_entries,
config_entry->mp_cache_entries_size,
sizeof(cache_entry),
configuration_entry_cache_mp_sort_cmp);
TRACE_OUT(configuration_entry_add_mp_cache_entry);
return (0);
}
cache_entry
configuration_entry_find_mp_cache_entry(
struct configuration_entry *config_entry, const char *mp_name)
{
cache_entry *result;
TRACE_IN(configuration_entry_find_mp_cache_entry);
result = bsearch(mp_name, config_entry->mp_cache_entries,
config_entry->mp_cache_entries_size,
sizeof(cache_entry), configuration_entry_cache_mp_cmp);
if (result == NULL) {
TRACE_OUT(configuration_entry_find_mp_cache_entry);
return (NULL);
} else {
TRACE_OUT(configuration_entry_find_mp_cache_entry);
return (*result);
}
}
/*
* Searches for all multipart entries with names starting with mp_name.
* Needed for cache flushing.
*/
int
configuration_entry_find_mp_cache_entries(
struct configuration_entry *config_entry, const char *mp_name,
cache_entry **start, cache_entry **finish)
{
cache_entry *result;
TRACE_IN(configuration_entry_find_mp_cache_entries);
result = bsearch(mp_name, config_entry->mp_cache_entries,
config_entry->mp_cache_entries_size,
sizeof(cache_entry), configuration_entry_cache_mp_part_cmp);
if (result == NULL) {
TRACE_OUT(configuration_entry_find_mp_cache_entries);
return (-1);
}
*start = result;
*finish = result + 1;
while (*start != config_entry->mp_cache_entries) {
if (configuration_entry_cache_mp_part_cmp(mp_name, *start - 1) == 0)
*start = *start - 1;
else
break;
}
while (*finish != config_entry->mp_cache_entries +
config_entry->mp_cache_entries_size) {
if (configuration_entry_cache_mp_part_cmp(
mp_name, *finish) == 0)
*finish = *finish + 1;
else
break;
}
TRACE_OUT(configuration_entry_find_mp_cache_entries);
return (0);
}
/*
* Configuration entry uses rwlock to handle access to its fields.
*/
void
configuration_lock_rdlock(struct configuration *config)
{
TRACE_IN(configuration_lock_rdlock);
pthread_rwlock_rdlock(&config->rwlock);
TRACE_OUT(configuration_lock_rdlock);
}
void
configuration_lock_wrlock(struct configuration *config)
{
TRACE_IN(configuration_lock_wrlock);
pthread_rwlock_wrlock(&config->rwlock);
TRACE_OUT(configuration_lock_wrlock);
}
void
configuration_unlock(struct configuration *config)
{
TRACE_IN(configuration_unlock);
pthread_rwlock_unlock(&config->rwlock);
TRACE_OUT(configuration_unlock);
}
/*
* Configuration entry uses 3 mutexes to handle cache operations. They are
* acquired by configuration_lock_entry and configuration_unlock_entry
* functions.
*/
void
configuration_lock_entry(struct configuration_entry *entry,
enum config_entry_lock_type lock_type)
{
TRACE_IN(configuration_lock_entry);
assert(entry != NULL);
switch (lock_type) {
case CELT_POSITIVE:
pthread_mutex_lock(&entry->positive_cache_lock);
break;
case CELT_NEGATIVE:
pthread_mutex_lock(&entry->negative_cache_lock);
break;
case CELT_MULTIPART:
pthread_mutex_lock(&entry->mp_cache_lock);
break;
default:
/* should be unreachable */
break;
}
TRACE_OUT(configuration_lock_entry);
}
void
configuration_unlock_entry(struct configuration_entry *entry,
enum config_entry_lock_type lock_type)
{
TRACE_IN(configuration_unlock_entry);
assert(entry != NULL);
switch (lock_type) {
case CELT_POSITIVE:
pthread_mutex_unlock(&entry->positive_cache_lock);
break;
case CELT_NEGATIVE:
pthread_mutex_unlock(&entry->negative_cache_lock);
break;
case CELT_MULTIPART:
pthread_mutex_unlock(&entry->mp_cache_lock);
break;
default:
/* should be unreachable */
break;
}
TRACE_OUT(configuration_unlock_entry);
}
struct configuration *
init_configuration(void)
{
struct configuration *retval;
TRACE_IN(init_configuration);
retval = (struct configuration *)malloc(sizeof(struct configuration));
assert(retval != NULL);
memset(retval, 0, sizeof(struct configuration));
retval->entries_capacity = INITIAL_ENTRIES_CAPACITY;
retval->entries = (struct configuration_entry **)malloc(
sizeof(struct configuration_entry *) *
retval->entries_capacity);
assert(retval->entries != NULL);
memset(retval->entries, 0, sizeof(struct configuration_entry *) *
retval->entries_capacity);
pthread_rwlock_init(&retval->rwlock, NULL);
TRACE_OUT(init_configuration);
return (retval);
}
void
fill_configuration_defaults(struct configuration *config)
{
size_t len, i;
TRACE_IN(fill_configuration_defaults);
assert(config != NULL);
if (config->socket_path != NULL)
free(config->socket_path);
len = strlen(DEFAULT_SOCKET_PATH);
config->socket_path = (char *)malloc(len + 1);
assert(config->socket_path != NULL);
memset(config->socket_path, 0, len + 1);
memcpy(config->socket_path, DEFAULT_SOCKET_PATH, len);
len = strlen(DEFAULT_PIDFILE_PATH);
config->pidfile_path = (char *)malloc(len + 1);
assert(config->pidfile_path != NULL);
memset(config->pidfile_path, 0, len + 1);
memcpy(config->pidfile_path, DEFAULT_PIDFILE_PATH, len);
config->socket_mode = S_IFSOCK | S_IRUSR | S_IWUSR |
S_IRGRP | S_IWGRP | S_IROTH | S_IWOTH;
config->force_unlink = 1;
config->query_timeout = DEFAULT_QUERY_TIMEOUT;
config->threads_num = DEFAULT_THREADS_NUM;
for (i = 0; i < config->entries_size; ++i)
destroy_configuration_entry(config->entries[i]);
config->entries_size = 0;
TRACE_OUT(fill_configuration_defaults);
}
void
destroy_configuration(struct configuration *config)
{
int i;
TRACE_IN(destroy_configuration);
assert(config != NULL);
free(config->pidfile_path);
free(config->socket_path);
for (i = 0; i < config->entries_size; ++i)
destroy_configuration_entry(config->entries[i]);
free(config->entries);
pthread_rwlock_destroy(&config->rwlock);
free(config);
TRACE_OUT(destroy_configuration);
}

View File

@ -1,156 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_CONFIG_H__
#define __CACHED_CONFIG_H__
#include <sys/stat.h>
#include <sys/types.h>
#include <pthread.h>
#include <nsswitch.h>
#include <unistd.h>
#include "cachelib.h"
#define DEFAULT_QUERY_TIMEOUT 8
#define DEFAULT_THREADS_NUM 8
#define DEFAULT_COMMON_ENTRY_TIMEOUT 10
#define DEFAULT_MP_ENTRY_TIMEOUT 60
#define DEFAULT_CACHE_HT_SIZE 257
#define INITIAL_ENTRIES_CAPACITY 8
#define DEFAULT_SOCKET_PATH "/var/run/cached"
#define DEFAULT_PIDFILE_PATH "/var/run/cached.pid"
#define DEFAULT_POSITIVE_ELEMENTS_SIZE (2048)
#define DEFAULT_POSITIVE_LIFETIME (3600)
#define DEFAULT_NEGATIVE_ELEMENTS_SIZE (2048)
#define DEFAULT_NEGATIVE_LIFETIME (60)
#define DEFAULT_MULTIPART_ELEMENTS_SIZE (1024 * 8)
#define DEFAULT_MULITPART_SESSIONS_SIZE (1024)
#define DEFAULT_MULITPART_LIFETIME (3600)
extern const char *c_default_entries[6];
/*
* Configuration entry represents the details of each cache entry in the
* config file (i.e. passwd or group). Its purpose also is to acquire locks
* of three different types (for usual read/write caching, for multipart
* caching and for caching of the negative results) for that cache entry.
*/
struct configuration_entry {
struct common_cache_entry_params positive_cache_params;
struct common_cache_entry_params negative_cache_params;
struct mp_cache_entry_params mp_cache_params;
/*
* configuration_entry holds pointers for all actual cache_entries,
* which are used for it. There is one for positive caching, one for
* for negative caching, and several (one per each euid/egid) for
* multipart caching.
*/
cache_entry positive_cache_entry;
cache_entry negative_cache_entry;
cache_entry *mp_cache_entries;
size_t mp_cache_entries_size;
struct timeval common_query_timeout;
struct timeval mp_query_timeout;
char *name;
pthread_mutex_t positive_cache_lock;
pthread_mutex_t negative_cache_lock;
pthread_mutex_t mp_cache_lock;
int perform_actual_lookups;
int enabled;
};
/*
* Contains global configuration options and array of all configuration entries
*/
struct configuration {
char *pidfile_path;
char *socket_path;
struct configuration_entry **entries;
size_t entries_capacity;
size_t entries_size;
pthread_rwlock_t rwlock;
mode_t socket_mode;
int force_unlink;
int query_timeout;
int threads_num;
};
enum config_entry_lock_type {
CELT_POSITIVE,
CELT_NEGATIVE,
CELT_MULTIPART
};
extern struct configuration *init_configuration(void);
extern void destroy_configuration(struct configuration *);
extern void fill_configuration_defaults(struct configuration *);
extern int add_configuration_entry(struct configuration *,
struct configuration_entry *);
extern struct configuration_entry *create_def_configuration_entry(
const char *);
extern void destroy_configuration_entry(struct configuration_entry *);
extern size_t configuration_get_entries_size(struct configuration *);
extern struct configuration_entry *configuration_get_entry(
struct configuration *, size_t);
extern struct configuration_entry *configuration_find_entry(
struct configuration *, const char *);
extern int configuration_entry_add_mp_cache_entry(struct configuration_entry *,
cache_entry);
extern cache_entry configuration_entry_find_mp_cache_entry(
struct configuration_entry *,
const char *);
extern int configuration_entry_find_mp_cache_entries(
struct configuration_entry *, const char *, cache_entry **,
cache_entry **);
extern void configuration_lock_rdlock(struct configuration *config);
extern void configuration_lock_wrlock(struct configuration *config);
extern void configuration_unlock(struct configuration *config);
extern void configuration_lock_entry(struct configuration_entry *,
enum config_entry_lock_type);
extern void configuration_unlock_entry(struct configuration_entry *,
enum config_entry_lock_type);
#endif

View File

@ -1,149 +0,0 @@
/*-
* Copyright (c) 2004 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <stdio.h>
#include "debug.h"
static int trace_level = 0;
static int trace_level_bk = 0;
void
__trace_in(const char *s, const char *f, int l)
{
int i;
if (trace_level < TRACE_WANTED)
{
for (i = 0; i < trace_level; ++i)
printf("\t");
printf("=> %s\n", s);
}
++trace_level;
}
void
__trace_point(const char *f, int l)
{
int i;
if (trace_level < TRACE_WANTED)
{
for (i = 0; i < trace_level - 1; ++i)
printf("\t");
printf("= %s: %d\n", f, l);
}
}
void
__trace_msg(const char *msg, const char *f, int l)
{
int i;
if (trace_level < TRACE_WANTED)
{
for (i = 0; i < trace_level - 1; ++i)
printf("\t");
printf("= MSG %s, %s: %d\n", msg, f, l);
}
}
void
__trace_ptr(const char *desc, const void *p, const char *f, int l)
{
int i;
if (trace_level < TRACE_WANTED)
{
for (i = 0; i < trace_level - 1; ++i)
printf("\t");
printf("= PTR %s: %p, %s: %d\n", desc, p, f, l);
}
}
void
__trace_int(const char *desc, int i, const char *f, int l)
{
int j;
if (trace_level < TRACE_WANTED)
{
for (j = 0; j < trace_level - 1; ++j)
printf("\t");
printf("= INT %s: %i, %s: %d\n",desc, i, f, l);
}
}
void
__trace_str(const char *desc, const char *s, const char *f, int l)
{
int i;
if (trace_level < TRACE_WANTED)
{
for (i = 0; i < trace_level - 1; ++i)
printf("\t");
printf("= STR %s: '%s', %s: %d\n", desc, s, f, l);
}
}
void
__trace_out(const char *s, const char *f, int l)
{
int i;
--trace_level;
if (trace_level < TRACE_WANTED)
{
for (i = 0; i < trace_level; ++i)
printf("\t");
printf("<= %s\n", s);
}
}
void
__trace_on()
{
trace_level = trace_level_bk;
trace_level_bk = 0;
}
void
__trace_off()
{
trace_level_bk = trace_level;
trace_level = 1024;
}

View File

@ -1,67 +0,0 @@
/*-
* Copyright (c) 2004 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_DEBUG_H__
#define __CACHED_DEBUG_H__
#define TRACE_WANTED 32
/* #ifndef NDEBUG */
#if 0
#define TRACE_IN(x) __trace_in(#x, __FILE__, __LINE__)
#define TRACE_POINT() __trace_point(__FILE__, __LINE__)
#define TRACE_MSG(x) __trace_msg(x, __FILE__, __LINE__)
#define TRACE_PTR(p) __trace_ptr(#p, p, __FILE__, __LINE__)
#define TRACE_INT(i) __trace_int(#i, i, __FILE__, __LINE__)
#define TRACE_STR(s) __trace_str(#s, s, __FILE__, __LINE__)
#define TRACE_OUT(x) __trace_out(#x, __FILE__, __LINE__)
#define TRACE_ON() __trace_on()
#define TRACE_OFF() __trace_off()
#else
#define TRACE_IN(x)
#define TRACE_POINT()
#define TRACE_MSG(x)
#define TRACE_PTR(p)
#define TRACE_INT(i)
#define TRACE_STR(s)
#define TRACE_OUT(x)
#define TRACE_ON()
#define TRACE_OFF()
#endif
extern void __trace_in(const char *, const char *, int);
extern void __trace_point(const char *, int);
extern void __trace_msg(const char *, const char *, int);
extern void __trace_ptr(const char *, const void *, const char *, int);
extern void __trace_int(const char *, int, const char *, int);
extern void __trace_str(const char *, const char *, const char *, int);
extern void __trace_out(const char *, const char *, int);
extern void __trace_on();
extern void __trace_off();
#endif

View File

@ -1,218 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHELIB_HASHTABLE_H__
#define __CACHELIB_HASHTABLE_H__
#include <search.h>
#include <string.h>
#define HASHTABLE_INITIAL_ENTRIES_CAPACITY 8
typedef int hashtable_index_t;
/*
* This file contains queue.h-like macro definitions for hash tables.
* Hash table is organized as an array of the specified size of the user
* defined (with HASTABLE_ENTRY_HEAD) structures. Each hash table
* entry (user defined structure) stores its elements in the sorted array.
* You can place elements into the hash table, retrieve elements with
* specified key, traverse through all elements, and delete them.
* New elements are placed into the hash table by using the compare and
* hashing functions, provided by the user.
*/
/*
* Defines the hash table entry structure, that uses specified type of
* elements.
*/
#define HASHTABLE_ENTRY_HEAD(name, type) struct name { \
type *values; \
size_t capacity; \
size_t size; \
}
/*
* Defines the hash table structure, which uses the specified type of entries.
* The only restriction for entries is that is that they should have the field,
* defined with HASHTABLE_ENTRY_HEAD macro.
*/
#define HASHTABLE_HEAD(name, entry) struct name { \
struct entry *entries; \
size_t entries_size; \
}
#define HASHTABLE_ENTRIES_COUNT(table) ((table)->entries_size)
/*
* Unlike most of queue.h data types, hash tables can not be initialized
* statically - so there is no HASHTABLE_HEAD_INITIALIZED macro.
*/
#define HASHTABLE_INIT(table, type, field, _entries_size) \
do { \
hashtable_index_t var; \
(table)->entries = (void *)malloc( \
sizeof(*(table)->entries) * (_entries_size)); \
memset((table)->entries, 0, \
sizeof(*(table)->entries) * (_entries_size)); \
(table)->entries_size = (_entries_size); \
for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\
(table)->entries[var].field.capacity = \
HASHTABLE_INITIAL_ENTRIES_CAPACITY; \
(table)->entries[var].field.size = 0; \
(table)->entries[var].field.values = (type *)malloc(\
sizeof(type) * \
HASHTABLE_INITIAL_ENTRIES_CAPACITY); \
assert((table)->entries[var].field.values != NULL);\
} \
} while (0)
/*
* All initialized hashtables should be destroyed with this macro.
*/
#define HASHTABLE_DESTROY(table, field) \
do { \
hashtable_index_t var; \
for (var = 0; var < HASHTABLE_ENTRIES_COUNT(table); ++var) {\
free((table)->entries[var].field.values); \
} \
} while (0)
#define HASHTABLE_GET_ENTRY(table, hash) (&((table)->entries[hash]))
/*
* Traverses through all hash table entries
*/
#define HASHTABLE_FOREACH(table, var) \
for ((var) = &((table)->entries[0]); \
(var) < &((table)->entries[HASHTABLE_ENTRIES_COUNT(table)]);\
++(var))
/*
* Traverses through all elements of the specified hash table entry
*/
#define HASHTABLE_ENTRY_FOREACH(entry, field, var) \
for ((var) = &((entry)->field.values[0]); \
(var) < &((entry)->field.values[(entry)->field.size]); \
++(var))
#define HASHTABLE_ENTRY_CLEAR(entry, field) \
((entry)->field.size = 0)
#define HASHTABLE_ENTRY_SIZE(entry, field) \
((entry)->field.size)
#define HASHTABLE_ENTRY_CAPACITY(entry, field) \
((entry)->field.capacity)
#define HASHTABLE_ENTRY_CAPACITY_INCREASE(entry, field, type) \
(entry)->field.capacity *= 2; \
(entry)->field.values = (type *)realloc((entry)->field.values, \
(entry)->field.capacity * sizeof(type));
#define HASHTABLE_ENTRY_CAPACITY_DECREASE(entry, field, type) \
(entry)->field.capacity /= 2; \
(entry)->field.values = (type *)realloc((entry)->field.values, \
(entry)->field.capacity * sizeof(type));
/*
* Generates prototypes for the hash table functions
*/
#define HASHTABLE_PROTOTYPE(name, entry_, type) \
hashtable_index_t name##_CALCULATE_HASH(struct name *, type *); \
void name##_ENTRY_STORE(struct entry_*, type *); \
type *name##_ENTRY_FIND(struct entry_*, type *); \
type *name##_ENTRY_FIND_SPECIAL(struct entry_ *, type *, \
int (*) (const void *, const void *)); \
void name##_ENTRY_REMOVE(struct entry_*, type *);
/*
* Generates implementations of the hash table functions
*/
#define HASHTABLE_GENERATE(name, entry_, type, field, HASH, CMP) \
hashtable_index_t name##_CALCULATE_HASH(struct name *table, type *data) \
{ \
\
return HASH(data, table->entries_size); \
} \
\
void name##_ENTRY_STORE(struct entry_ *the_entry, type *data) \
{ \
\
if (the_entry->field.size == the_entry->field.capacity) \
HASHTABLE_ENTRY_CAPACITY_INCREASE(the_entry, field, type);\
\
memcpy(&(the_entry->field.values[the_entry->field.size++]), \
data, \
sizeof(type)); \
qsort(the_entry->field.values, the_entry->field.size, \
sizeof(type), CMP); \
} \
\
type *name##_ENTRY_FIND(struct entry_ *the_entry, type *key) \
{ \
\
return ((type *)bsearch(key, the_entry->field.values, \
the_entry->field.size, sizeof(type), CMP)); \
} \
\
type *name##_ENTRY_FIND_SPECIAL(struct entry_ *the_entry, type *key, \
int (*compar) (const void *, const void *)) \
{ \
return ((type *)bsearch(key, the_entry->field.values, \
the_entry->field.size, sizeof(type), compar)); \
} \
\
void name##_ENTRY_REMOVE(struct entry_ *the_entry, type *del_elm) \
{ \
\
memmove(del_elm, del_elm + 1, \
(&the_entry->field.values[--the_entry->field.size] - del_elm) *\
sizeof(type)); \
}
/*
* Macro definitions below wrap the functions, generaed with
* HASHTABLE_GENERATE macro. You should use them and avoid using generated
* functions directly.
*/
#define HASHTABLE_CALCULATE_HASH(name, table, data) \
(name##_CALCULATE_HASH((table), data))
#define HASHTABLE_ENTRY_STORE(name, entry, data) \
name##_ENTRY_STORE((entry), data)
#define HASHTABLE_ENTRY_FIND(name, entry, key) \
(name##_ENTRY_FIND((entry), (key)))
#define HASHTABLE_ENTRY_FIND_SPECIAL(name, entry, key, cmp) \
(name##_ENTRY_FIND_SPECIAL((entry), (key), (cmp)))
#define HASHTABLE_ENTRY_REMOVE(name, entry, del_elm) \
name##_ENTRY_REMOVE((entry), (del_elm))
#endif

View File

@ -1,78 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <syslog.h>
#include "log.h"
void
__log_msg(int level, const char *sender, const char *message, ...)
{
va_list ap;
char *fmessage;
fmessage = NULL;
va_start(ap, message);
vasprintf(&fmessage, message, ap);
va_end(ap);
assert(fmessage != NULL);
printf("M%d from %s: %s\n", level, sender, fmessage);
#ifndef NO_SYSLOG
if (level == 0)
syslog(LOG_INFO, "cached message (from %s): %s", sender,
fmessage);
#endif
free(fmessage);
}
void
__log_err(int level, const char *sender, const char *error, ...)
{
va_list ap;
char *ferror;
ferror = NULL;
va_start(ap, error);
vasprintf(&ferror, error, ap);
va_end(ap);
assert(ferror != NULL);
printf("E%d from %s: %s\n", level, sender, ferror);
#ifndef NO_SYSLOG
if (level == 0)
syslog(LOG_ERR, "cached error (from %s): %s", sender, ferror);
#endif
free(ferror);
}

View File

@ -1,43 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_LOG_H__
#define __CACHED_LOG_H__
#define LOG_MSG_1(sender, msg, ...) __log_msg(1, sender, msg, ##__VA_ARGS__)
#define LOG_MSG_2(sender, msg, ...) __log_msg(2, sender, msg, ##__VA_ARGS__)
#define LOG_MSG_3(sender, msg, ...) __log_msg(3, sedner, msg, ##__VA_ARGS__)
#define LOG_ERR_1(sender, err, ...) __log_err(1, sender, err, ##__VA_ARGS__)
#define LOG_ERR_2(sender, err, ...) __log_err(2, sender, err, ##__VA_ARGS__)
#define LOG_ERR_3(sender, err, ...) __log_err(3, sender, err, ##__VA_ARGS__)
extern void __log_msg(int, const char *, const char *, ...);
extern void __log_err(int, const char *, const char *, ...);
#endif

View File

@ -1,537 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/event.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "cachelib.h"
#include "config.h"
#include "debug.h"
#include "log.h"
#include "query.h"
#include "mp_rs_query.h"
#include "mp_ws_query.h"
#include "singletons.h"
static int on_mp_read_session_close_notification(struct query_state *);
static void on_mp_read_session_destroy(struct query_state *);
static int on_mp_read_session_mapper(struct query_state *);
/* int on_mp_read_session_request_read1(struct query_state *); */
static int on_mp_read_session_request_read2(struct query_state *);
static int on_mp_read_session_request_process(struct query_state *);
static int on_mp_read_session_response_write1(struct query_state *);
static int on_mp_read_session_read_request_process(struct query_state *);
static int on_mp_read_session_read_response_write1(struct query_state *);
static int on_mp_read_session_read_response_write2(struct query_state *);
/*
* This function is used as the query_state's destroy_func to make the
* proper cleanup in case of errors.
*/
static void
on_mp_read_session_destroy(struct query_state *qstate)
{
TRACE_IN(on_mp_read_session_destroy);
finalize_comm_element(&qstate->request);
finalize_comm_element(&qstate->response);
if (qstate->mdata != NULL) {
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
close_cache_mp_read_session(
(cache_mp_read_session)qstate->mdata);
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
}
TRACE_OUT(on_mp_read_session_destroy);
}
/*
* The functions below are used to process multipart read session initiation
* requests.
* - on_mp_read_session_request_read1 and on_mp_read_session_request_read2 read
* the request itself
* - on_mp_read_session_request_process processes it
* - on_mp_read_session_response_write1 sends the response
*/
int
on_mp_read_session_request_read1(struct query_state *qstate)
{
struct cache_mp_read_session_request *c_mp_rs_request;
ssize_t result;
TRACE_IN(on_mp_read_session_request_read1);
if (qstate->kevent_watermark == 0)
qstate->kevent_watermark = sizeof(size_t);
else {
init_comm_element(&qstate->request,
CET_MP_READ_SESSION_REQUEST);
c_mp_rs_request = get_cache_mp_read_session_request(
&qstate->request);
result = qstate->read_func(qstate,
&c_mp_rs_request->entry_length, sizeof(size_t));
if (result != sizeof(size_t)) {
TRACE_OUT(on_mp_read_session_request_read1);
return (-1);
}
if (BUFSIZE_INVALID(c_mp_rs_request->entry_length)) {
TRACE_OUT(on_mp_read_session_request_read1);
return (-1);
}
c_mp_rs_request->entry = (char *)malloc(
c_mp_rs_request->entry_length + 1);
assert(c_mp_rs_request->entry != NULL);
memset(c_mp_rs_request->entry, 0,
c_mp_rs_request->entry_length + 1);
qstate->kevent_watermark = c_mp_rs_request->entry_length;
qstate->process_func = on_mp_read_session_request_read2;
}
TRACE_OUT(on_mp_read_session_request_read1);
return (0);
}
static int
on_mp_read_session_request_read2(struct query_state *qstate)
{
struct cache_mp_read_session_request *c_mp_rs_request;
ssize_t result;
TRACE_IN(on_mp_read_session_request_read2);
c_mp_rs_request = get_cache_mp_read_session_request(&qstate->request);
result = qstate->read_func(qstate, c_mp_rs_request->entry,
c_mp_rs_request->entry_length);
if (result != qstate->kevent_watermark) {
LOG_ERR_3("on_mp_read_session_request_read2",
"read failed");
TRACE_OUT(on_mp_read_session_request_read2);
return (-1);
}
qstate->kevent_watermark = 0;
qstate->process_func = on_mp_read_session_request_process;
TRACE_OUT(on_mp_read_session_request_read2);
return (0);
}
static int
on_mp_read_session_request_process(struct query_state *qstate)
{
struct cache_mp_read_session_request *c_mp_rs_request;
struct cache_mp_read_session_response *c_mp_rs_response;
cache_mp_read_session rs;
cache_entry c_entry;
char *dec_cache_entry_name;
char *buffer;
size_t buffer_size;
cache_mp_write_session ws;
struct agent *lookup_agent;
struct multipart_agent *mp_agent;
void *mdata;
int res;
TRACE_IN(on_mp_read_session_request_process);
init_comm_element(&qstate->response, CET_MP_READ_SESSION_RESPONSE);
c_mp_rs_response = get_cache_mp_read_session_response(
&qstate->response);
c_mp_rs_request = get_cache_mp_read_session_request(&qstate->request);
qstate->config_entry = configuration_find_entry(
s_configuration, c_mp_rs_request->entry);
if (qstate->config_entry == NULL) {
c_mp_rs_response->error_code = ENOENT;
LOG_ERR_2("read_session_request",
"can't find configuration entry '%s'."
" aborting request", c_mp_rs_request->entry);
goto fin;
}
if (qstate->config_entry->enabled == 0) {
c_mp_rs_response->error_code = EACCES;
LOG_ERR_2("read_session_request",
"configuration entry '%s' is disabled",
c_mp_rs_request->entry);
goto fin;
}
if (qstate->config_entry->perform_actual_lookups != 0)
dec_cache_entry_name = strdup(
qstate->config_entry->mp_cache_params.entry_name);
else {
#ifdef NS_CACHED_EID_CHECKING
if (check_query_eids(qstate) != 0) {
c_mp_rs_response->error_code = EPERM;
goto fin;
}
#endif
asprintf(&dec_cache_entry_name, "%s%s", qstate->eid_str,
qstate->config_entry->mp_cache_params.entry_name);
}
assert(dec_cache_entry_name != NULL);
configuration_lock_rdlock(s_configuration);
c_entry = find_cache_entry(s_cache, dec_cache_entry_name);
configuration_unlock(s_configuration);
if ((c_entry == INVALID_CACHE) &&
(qstate->config_entry->perform_actual_lookups != 0))
c_entry = register_new_mp_cache_entry(qstate,
dec_cache_entry_name);
free(dec_cache_entry_name);
if (c_entry != INVALID_CACHE_ENTRY) {
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
rs = open_cache_mp_read_session(c_entry);
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
if ((rs == INVALID_CACHE_MP_READ_SESSION) &&
(qstate->config_entry->perform_actual_lookups != 0)) {
lookup_agent = find_agent(s_agent_table,
c_mp_rs_request->entry, MULTIPART_AGENT);
if ((lookup_agent != NULL) &&
(lookup_agent->type == MULTIPART_AGENT)) {
mp_agent = (struct multipart_agent *)
lookup_agent;
mdata = mp_agent->mp_init_func();
/*
* Multipart agents read the whole snapshot
* of the data at one time.
*/
configuration_lock_entry(qstate->config_entry,
CELT_MULTIPART);
ws = open_cache_mp_write_session(c_entry);
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
if (ws != NULL) {
do {
buffer = NULL;
res = mp_agent->mp_lookup_func(&buffer,
&buffer_size,
mdata);
if ((res & NS_TERMINATE) &&
(buffer != NULL)) {
configuration_lock_entry(
qstate->config_entry,
CELT_MULTIPART);
if (cache_mp_write(ws, buffer,
buffer_size) != 0) {
abandon_cache_mp_write_session(ws);
ws = NULL;
}
configuration_unlock_entry(
qstate->config_entry,
CELT_MULTIPART);
free(buffer);
buffer = NULL;
} else {
configuration_lock_entry(
qstate->config_entry,
CELT_MULTIPART);
close_cache_mp_write_session(ws);
configuration_unlock_entry(
qstate->config_entry,
CELT_MULTIPART);
free(buffer);
buffer = NULL;
}
} while ((res & NS_TERMINATE) &&
(ws != NULL));
}
configuration_lock_entry(qstate->config_entry,
CELT_MULTIPART);
rs = open_cache_mp_read_session(c_entry);
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
}
}
if (rs == INVALID_CACHE_MP_READ_SESSION)
c_mp_rs_response->error_code = -1;
else {
qstate->mdata = rs;
qstate->destroy_func = on_mp_read_session_destroy;
configuration_lock_entry(qstate->config_entry,
CELT_MULTIPART);
if ((qstate->config_entry->mp_query_timeout.tv_sec != 0) ||
(qstate->config_entry->mp_query_timeout.tv_usec != 0))
memcpy(&qstate->timeout,
&qstate->config_entry->mp_query_timeout,
sizeof(struct timeval));
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
}
} else
c_mp_rs_response->error_code = -1;
fin:
qstate->process_func = on_mp_read_session_response_write1;
qstate->kevent_watermark = sizeof(int);
qstate->kevent_filter = EVFILT_WRITE;
TRACE_OUT(on_mp_read_session_request_process);
return (0);
}
static int
on_mp_read_session_response_write1(struct query_state *qstate)
{
struct cache_mp_read_session_response *c_mp_rs_response;
ssize_t result;
TRACE_IN(on_mp_read_session_response_write1);
c_mp_rs_response = get_cache_mp_read_session_response(
&qstate->response);
result = qstate->write_func(qstate, &c_mp_rs_response->error_code,
sizeof(int));
if (result != sizeof(int)) {
LOG_ERR_3("on_mp_read_session_response_write1",
"write failed");
TRACE_OUT(on_mp_read_session_response_write1);
return (-1);
}
if (c_mp_rs_response->error_code == 0) {
qstate->kevent_watermark = sizeof(int);
qstate->process_func = on_mp_read_session_mapper;
qstate->kevent_filter = EVFILT_READ;
} else {
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
}
TRACE_OUT(on_mp_read_session_response_write1);
return (0);
}
/*
* Mapper function is used to avoid multiple connections for each session
* write or read requests. After processing the request, it does not close
* the connection, but waits for the next request.
*/
static int
on_mp_read_session_mapper(struct query_state *qstate)
{
ssize_t result;
int elem_type;
TRACE_IN(on_mp_read_session_mapper);
if (qstate->kevent_watermark == 0) {
qstate->kevent_watermark = sizeof(int);
} else {
result = qstate->read_func(qstate, &elem_type, sizeof(int));
if (result != sizeof(int)) {
LOG_ERR_3("on_mp_read_session_mapper",
"read failed");
TRACE_OUT(on_mp_read_session_mapper);
return (-1);
}
switch (elem_type) {
case CET_MP_READ_SESSION_READ_REQUEST:
qstate->kevent_watermark = 0;
qstate->process_func =
on_mp_read_session_read_request_process;
break;
case CET_MP_READ_SESSION_CLOSE_NOTIFICATION:
qstate->kevent_watermark = 0;
qstate->process_func =
on_mp_read_session_close_notification;
break;
default:
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
LOG_ERR_3("on_mp_read_session_mapper",
"unknown element type");
TRACE_OUT(on_mp_read_session_mapper);
return (-1);
}
}
TRACE_OUT(on_mp_read_session_mapper);
return (0);
}
/*
* The functions below are used to process multipart read sessions read
* requests. User doesn't have to pass any kind of data, besides the
* request identificator itself. So we don't need any XXX_read functions and
* start with the XXX_process function.
* - on_mp_read_session_read_request_process processes it
* - on_mp_read_session_read_response_write1 and
* on_mp_read_session_read_response_write2 sends the response
*/
static int
on_mp_read_session_read_request_process(struct query_state *qstate)
{
struct cache_mp_read_session_read_response *read_response;
TRACE_IN(on_mp_read_session_response_process);
init_comm_element(&qstate->response, CET_MP_READ_SESSION_READ_RESPONSE);
read_response = get_cache_mp_read_session_read_response(
&qstate->response);
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
read_response->error_code = cache_mp_read(
(cache_mp_read_session)qstate->mdata, NULL,
&read_response->data_size);
if (read_response->error_code == 0) {
read_response->data = (char *)malloc(read_response->data_size);
assert(read_response != NULL);
read_response->error_code = cache_mp_read(
(cache_mp_read_session)qstate->mdata,
read_response->data,
&read_response->data_size);
}
configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
if (read_response->error_code == 0)
qstate->kevent_watermark = sizeof(size_t) + sizeof(int);
else
qstate->kevent_watermark = sizeof(int);
qstate->process_func = on_mp_read_session_read_response_write1;
qstate->kevent_filter = EVFILT_WRITE;
TRACE_OUT(on_mp_read_session_response_process);
return (0);
}
static int
on_mp_read_session_read_response_write1(struct query_state *qstate)
{
struct cache_mp_read_session_read_response *read_response;
ssize_t result;
TRACE_IN(on_mp_read_session_read_response_write1);
read_response = get_cache_mp_read_session_read_response(
&qstate->response);
result = qstate->write_func(qstate, &read_response->error_code,
sizeof(int));
if (read_response->error_code == 0) {
result += qstate->write_func(qstate, &read_response->data_size,
sizeof(size_t));
if (result != qstate->kevent_watermark) {
TRACE_OUT(on_mp_read_session_read_response_write1);
LOG_ERR_3("on_mp_read_session_read_response_write1",
"write failed");
return (-1);
}
qstate->kevent_watermark = read_response->data_size;
qstate->process_func = on_mp_read_session_read_response_write2;
} else {
if (result != qstate->kevent_watermark) {
LOG_ERR_3("on_mp_read_session_read_response_write1",
"write failed");
TRACE_OUT(on_mp_read_session_read_response_write1);
return (-1);
}
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
}
TRACE_OUT(on_mp_read_session_read_response_write1);
return (0);
}
static int
on_mp_read_session_read_response_write2(struct query_state *qstate)
{
struct cache_mp_read_session_read_response *read_response;
ssize_t result;
TRACE_IN(on_mp_read_session_read_response_write2);
read_response = get_cache_mp_read_session_read_response(
&qstate->response);
result = qstate->write_func(qstate, read_response->data,
read_response->data_size);
if (result != qstate->kevent_watermark) {
LOG_ERR_3("on_mp_read_session_read_response_write2",
"write failed");
TRACE_OUT(on_mp_read_session_read_response_write2);
return (-1);
}
finalize_comm_element(&qstate->request);
finalize_comm_element(&qstate->response);
qstate->kevent_watermark = sizeof(int);
qstate->process_func = on_mp_read_session_mapper;
qstate->kevent_filter = EVFILT_READ;
TRACE_OUT(on_mp_read_session_read_response_write2);
return (0);
}
/*
* Handles session close notification by calling close_cache_mp_read_session
* function.
*/
static int
on_mp_read_session_close_notification(struct query_state *qstate)
{
TRACE_IN(on_mp_read_session_close_notification);
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
close_cache_mp_read_session((cache_mp_read_session)qstate->mdata);
configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
qstate->mdata = NULL;
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
TRACE_OUT(on_mp_read_session_close_notification);
return (0);
}

View File

@ -1,34 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_MP_RS_QUERY_H__
#define __CACHED_MP_RS_QUERY_H__
extern int on_mp_read_session_request_read1(struct query_state *);
#endif

View File

@ -1,548 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/socket.h>
#include <sys/time.h>
#include <sys/types.h>
#include <sys/event.h>
#include <assert.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include "cachelib.h"
#include "config.h"
#include "debug.h"
#include "log.h"
#include "query.h"
#include "mp_ws_query.h"
#include "singletons.h"
static int on_mp_write_session_abandon_notification(struct query_state *);
static int on_mp_write_session_close_notification(struct query_state *);
static void on_mp_write_session_destroy(struct query_state *);
static int on_mp_write_session_mapper(struct query_state *);
/* int on_mp_write_session_request_read1(struct query_state *); */
static int on_mp_write_session_request_read2(struct query_state *);
static int on_mp_write_session_request_process(struct query_state *);
static int on_mp_write_session_response_write1(struct query_state *);
static int on_mp_write_session_write_request_read1(struct query_state *);
static int on_mp_write_session_write_request_read2(struct query_state *);
static int on_mp_write_session_write_request_process(struct query_state *);
static int on_mp_write_session_write_response_write1(struct query_state *);
/*
* This function is used as the query_state's destroy_func to make the
* proper cleanup in case of errors.
*/
static void
on_mp_write_session_destroy(struct query_state *qstate)
{
TRACE_IN(on_mp_write_session_destroy);
finalize_comm_element(&qstate->request);
finalize_comm_element(&qstate->response);
if (qstate->mdata != NULL) {
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
abandon_cache_mp_write_session(
(cache_mp_write_session)qstate->mdata);
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
}
TRACE_OUT(on_mp_write_session_destroy);
}
/*
* The functions below are used to process multipart write session initiation
* requests.
* - on_mp_write_session_request_read1 and on_mp_write_session_request_read2
* read the request itself
* - on_mp_write_session_request_process processes it
* - on_mp_write_session_response_write1 sends the response
*/
int
on_mp_write_session_request_read1(struct query_state *qstate)
{
struct cache_mp_write_session_request *c_mp_ws_request;
ssize_t result;
TRACE_IN(on_mp_write_session_request_read1);
if (qstate->kevent_watermark == 0)
qstate->kevent_watermark = sizeof(size_t);
else {
init_comm_element(&qstate->request,
CET_MP_WRITE_SESSION_REQUEST);
c_mp_ws_request = get_cache_mp_write_session_request(
&qstate->request);
result = qstate->read_func(qstate,
&c_mp_ws_request->entry_length, sizeof(size_t));
if (result != sizeof(size_t)) {
LOG_ERR_3("on_mp_write_session_request_read1",
"read failed");
TRACE_OUT(on_mp_write_session_request_read1);
return (-1);
}
if (BUFSIZE_INVALID(c_mp_ws_request->entry_length)) {
LOG_ERR_3("on_mp_write_session_request_read1",
"invalid entry_length value");
TRACE_OUT(on_mp_write_session_request_read1);
return (-1);
}
c_mp_ws_request->entry = (char *)malloc(
c_mp_ws_request->entry_length + 1);
assert(c_mp_ws_request->entry != NULL);
memset(c_mp_ws_request->entry, 0,
c_mp_ws_request->entry_length + 1);
qstate->kevent_watermark = c_mp_ws_request->entry_length;
qstate->process_func = on_mp_write_session_request_read2;
}
TRACE_OUT(on_mp_write_session_request_read1);
return (0);
}
static int
on_mp_write_session_request_read2(struct query_state *qstate)
{
struct cache_mp_write_session_request *c_mp_ws_request;
ssize_t result;
TRACE_IN(on_mp_write_session_request_read2);
c_mp_ws_request = get_cache_mp_write_session_request(&qstate->request);
result = qstate->read_func(qstate, c_mp_ws_request->entry,
c_mp_ws_request->entry_length);
if (result != qstate->kevent_watermark) {
LOG_ERR_3("on_mp_write_session_request_read2",
"read failed");
TRACE_OUT(on_mp_write_session_request_read2);
return (-1);
}
qstate->kevent_watermark = 0;
qstate->process_func = on_mp_write_session_request_process;
TRACE_OUT(on_mp_write_session_request_read2);
return (0);
}
static int
on_mp_write_session_request_process(struct query_state *qstate)
{
struct cache_mp_write_session_request *c_mp_ws_request;
struct cache_mp_write_session_response *c_mp_ws_response;
cache_mp_write_session ws;
cache_entry c_entry;
char *dec_cache_entry_name;
TRACE_IN(on_mp_write_session_request_process);
init_comm_element(&qstate->response, CET_MP_WRITE_SESSION_RESPONSE);
c_mp_ws_response = get_cache_mp_write_session_response(
&qstate->response);
c_mp_ws_request = get_cache_mp_write_session_request(&qstate->request);
qstate->config_entry = configuration_find_entry(
s_configuration, c_mp_ws_request->entry);
if (qstate->config_entry == NULL) {
c_mp_ws_response->error_code = ENOENT;
LOG_ERR_2("write_session_request",
"can't find configuration entry '%s'. "
"aborting request", c_mp_ws_request->entry);
goto fin;
}
if (qstate->config_entry->enabled == 0) {
c_mp_ws_response->error_code = EACCES;
LOG_ERR_2("write_session_request",
"configuration entry '%s' is disabled",
c_mp_ws_request->entry);
goto fin;
}
if (qstate->config_entry->perform_actual_lookups != 0) {
c_mp_ws_response->error_code = EOPNOTSUPP;
LOG_ERR_2("write_session_request",
"entry '%s' performs lookups by itself: "
"can't write to it", c_mp_ws_request->entry);
goto fin;
} else {
#ifdef NS_CACHED_EID_CHECKING
if (check_query_eids(qstate) != 0) {
c_mp_ws_response->error_code = EPERM;
goto fin;
}
#endif
}
/*
* All multipart entries are separated by their name decorations.
* For one configuration entry there will be a lot of multipart
* cache entries - each with its own decorated name.
*/
asprintf(&dec_cache_entry_name, "%s%s", qstate->eid_str,
qstate->config_entry->mp_cache_params.entry_name);
assert(dec_cache_entry_name != NULL);
configuration_lock_rdlock(s_configuration);
c_entry = find_cache_entry(s_cache,
dec_cache_entry_name);
configuration_unlock(s_configuration);
if (c_entry == INVALID_CACHE_ENTRY)
c_entry = register_new_mp_cache_entry(qstate,
dec_cache_entry_name);
free(dec_cache_entry_name);
assert(c_entry != NULL);
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
ws = open_cache_mp_write_session(c_entry);
if (ws == INVALID_CACHE_MP_WRITE_SESSION)
c_mp_ws_response->error_code = -1;
else {
qstate->mdata = ws;
qstate->destroy_func = on_mp_write_session_destroy;
if ((qstate->config_entry->mp_query_timeout.tv_sec != 0) ||
(qstate->config_entry->mp_query_timeout.tv_usec != 0))
memcpy(&qstate->timeout,
&qstate->config_entry->mp_query_timeout,
sizeof(struct timeval));
}
configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
fin:
qstate->process_func = on_mp_write_session_response_write1;
qstate->kevent_watermark = sizeof(int);
qstate->kevent_filter = EVFILT_WRITE;
TRACE_OUT(on_mp_write_session_request_process);
return (0);
}
static int
on_mp_write_session_response_write1(struct query_state *qstate)
{
struct cache_mp_write_session_response *c_mp_ws_response;
ssize_t result;
TRACE_IN(on_mp_write_session_response_write1);
c_mp_ws_response = get_cache_mp_write_session_response(
&qstate->response);
result = qstate->write_func(qstate, &c_mp_ws_response->error_code,
sizeof(int));
if (result != sizeof(int)) {
LOG_ERR_3("on_mp_write_session_response_write1",
"write failed");
TRACE_OUT(on_mp_write_session_response_write1);
return (-1);
}
if (c_mp_ws_response->error_code == 0) {
qstate->kevent_watermark = sizeof(int);
qstate->process_func = on_mp_write_session_mapper;
qstate->kevent_filter = EVFILT_READ;
} else {
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
}
TRACE_OUT(on_mp_write_session_response_write1);
return (0);
}
/*
* Mapper function is used to avoid multiple connections for each session
* write or read requests. After processing the request, it does not close
* the connection, but waits for the next request.
*/
static int
on_mp_write_session_mapper(struct query_state *qstate)
{
ssize_t result;
int elem_type;
TRACE_IN(on_mp_write_session_mapper);
if (qstate->kevent_watermark == 0) {
qstate->kevent_watermark = sizeof(int);
} else {
result = qstate->read_func(qstate, &elem_type, sizeof(int));
if (result != sizeof(int)) {
LOG_ERR_3("on_mp_write_session_mapper",
"read failed");
TRACE_OUT(on_mp_write_session_mapper);
return (-1);
}
switch (elem_type) {
case CET_MP_WRITE_SESSION_WRITE_REQUEST:
qstate->kevent_watermark = sizeof(size_t);
qstate->process_func =
on_mp_write_session_write_request_read1;
break;
case CET_MP_WRITE_SESSION_ABANDON_NOTIFICATION:
qstate->kevent_watermark = 0;
qstate->process_func =
on_mp_write_session_abandon_notification;
break;
case CET_MP_WRITE_SESSION_CLOSE_NOTIFICATION:
qstate->kevent_watermark = 0;
qstate->process_func =
on_mp_write_session_close_notification;
break;
default:
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
LOG_ERR_2("on_mp_write_session_mapper",
"unknown element type");
TRACE_OUT(on_mp_write_session_mapper);
return (-1);
}
}
TRACE_OUT(on_mp_write_session_mapper);
return (0);
}
/*
* The functions below are used to process multipart write sessions write
* requests.
* - on_mp_write_session_write_request_read1 and
* on_mp_write_session_write_request_read2 read the request itself
* - on_mp_write_session_write_request_process processes it
* - on_mp_write_session_write_response_write1 sends the response
*/
static int
on_mp_write_session_write_request_read1(struct query_state *qstate)
{
struct cache_mp_write_session_write_request *write_request;
ssize_t result;
TRACE_IN(on_mp_write_session_write_request_read1);
init_comm_element(&qstate->request,
CET_MP_WRITE_SESSION_WRITE_REQUEST);
write_request = get_cache_mp_write_session_write_request(
&qstate->request);
result = qstate->read_func(qstate, &write_request->data_size,
sizeof(size_t));
if (result != sizeof(size_t)) {
LOG_ERR_3("on_mp_write_session_write_request_read1",
"read failed");
TRACE_OUT(on_mp_write_session_write_request_read1);
return (-1);
}
if (BUFSIZE_INVALID(write_request->data_size)) {
LOG_ERR_3("on_mp_write_session_write_request_read1",
"invalid data_size value");
TRACE_OUT(on_mp_write_session_write_request_read1);
return (-1);
}
write_request->data = (char *)malloc(write_request->data_size);
assert(write_request->data != NULL);
memset(write_request->data, 0, write_request->data_size);
qstate->kevent_watermark = write_request->data_size;
qstate->process_func = on_mp_write_session_write_request_read2;
TRACE_OUT(on_mp_write_session_write_request_read1);
return (0);
}
static int
on_mp_write_session_write_request_read2(struct query_state *qstate)
{
struct cache_mp_write_session_write_request *write_request;
ssize_t result;
TRACE_IN(on_mp_write_session_write_request_read2);
write_request = get_cache_mp_write_session_write_request(
&qstate->request);
result = qstate->read_func(qstate, write_request->data,
write_request->data_size);
if (result != qstate->kevent_watermark) {
LOG_ERR_3("on_mp_write_session_write_request_read2",
"read failed");
TRACE_OUT(on_mp_write_session_write_request_read2);
return (-1);
}
qstate->kevent_watermark = 0;
qstate->process_func = on_mp_write_session_write_request_process;
TRACE_OUT(on_mp_write_session_write_request_read2);
return (0);
}
static int
on_mp_write_session_write_request_process(struct query_state *qstate)
{
struct cache_mp_write_session_write_request *write_request;
struct cache_mp_write_session_write_response *write_response;
TRACE_IN(on_mp_write_session_write_request_process);
init_comm_element(&qstate->response,
CET_MP_WRITE_SESSION_WRITE_RESPONSE);
write_response = get_cache_mp_write_session_write_response(
&qstate->response);
write_request = get_cache_mp_write_session_write_request(
&qstate->request);
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
write_response->error_code = cache_mp_write(
(cache_mp_write_session)qstate->mdata,
write_request->data,
write_request->data_size);
configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
qstate->kevent_watermark = sizeof(int);
qstate->process_func = on_mp_write_session_write_response_write1;
qstate->kevent_filter = EVFILT_WRITE;
TRACE_OUT(on_mp_write_session_write_request_process);
return (0);
}
static int
on_mp_write_session_write_response_write1(struct query_state *qstate)
{
struct cache_mp_write_session_write_response *write_response;
ssize_t result;
TRACE_IN(on_mp_write_session_write_response_write1);
write_response = get_cache_mp_write_session_write_response(
&qstate->response);
result = qstate->write_func(qstate, &write_response->error_code,
sizeof(int));
if (result != sizeof(int)) {
LOG_ERR_3("on_mp_write_session_write_response_write1",
"write failed");
TRACE_OUT(on_mp_write_session_write_response_write1);
return (-1);
}
if (write_response->error_code == 0) {
finalize_comm_element(&qstate->request);
finalize_comm_element(&qstate->response);
qstate->kevent_watermark = sizeof(int);
qstate->process_func = on_mp_write_session_mapper;
qstate->kevent_filter = EVFILT_READ;
} else {
qstate->kevent_watermark = 0;
qstate->process_func = 0;
}
TRACE_OUT(on_mp_write_session_write_response_write1);
return (0);
}
/*
* Handles abandon notifications. Destroys the session by calling the
* abandon_cache_mp_write_session.
*/
static int
on_mp_write_session_abandon_notification(struct query_state *qstate)
{
TRACE_IN(on_mp_write_session_abandon_notification);
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
abandon_cache_mp_write_session((cache_mp_write_session)qstate->mdata);
configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
qstate->mdata = INVALID_CACHE_MP_WRITE_SESSION;
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
TRACE_OUT(on_mp_write_session_abandon_notification);
return (0);
}
/*
* Handles close notifications. Commits the session by calling
* the close_cache_mp_write_session.
*/
static int
on_mp_write_session_close_notification(struct query_state *qstate)
{
TRACE_IN(on_mp_write_session_close_notification);
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
close_cache_mp_write_session((cache_mp_write_session)qstate->mdata);
configuration_unlock_entry(qstate->config_entry, CELT_MULTIPART);
qstate->mdata = INVALID_CACHE_MP_WRITE_SESSION;
qstate->kevent_watermark = 0;
qstate->process_func = NULL;
TRACE_OUT(on_mp_write_session_close_notification);
return (0);
}
cache_entry register_new_mp_cache_entry(struct query_state *qstate,
const char *dec_cache_entry_name)
{
cache_entry c_entry;
char *en_bkp;
TRACE_IN(register_new_mp_cache_entry);
c_entry = INVALID_CACHE_ENTRY;
configuration_lock_entry(qstate->config_entry, CELT_MULTIPART);
configuration_lock_wrlock(s_configuration);
en_bkp = qstate->config_entry->mp_cache_params.entry_name;
qstate->config_entry->mp_cache_params.entry_name =
(char *)dec_cache_entry_name;
register_cache_entry(s_cache, (struct cache_entry_params *)
&qstate->config_entry->mp_cache_params);
qstate->config_entry->mp_cache_params.entry_name = en_bkp;
configuration_unlock(s_configuration);
configuration_lock_rdlock(s_configuration);
c_entry = find_cache_entry(s_cache,
dec_cache_entry_name);
configuration_unlock(s_configuration);
configuration_entry_add_mp_cache_entry(qstate->config_entry,
c_entry);
configuration_unlock_entry(qstate->config_entry,
CELT_MULTIPART);
TRACE_OUT(register_new_mp_cache_entry);
return (c_entry);
}

View File

@ -1,36 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_MP_WS_QUERY_H__
#define __CACHED_MP_WS_QUERY_H__
extern int on_mp_write_session_request_read1(struct query_state *);
extern cache_entry register_new_mp_cache_entry(struct query_state *,
const char *);
#endif

View File

@ -1,474 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include "config.h"
#include "debug.h"
#include "log.h"
#include "parser.h"
static void enable_cache(struct configuration *,const char *, int);
static struct configuration_entry *find_create_entry(struct configuration *,
const char *);
static int get_number(const char *, int, int);
static enum cache_policy_t get_policy(const char *);
static int get_yesno(const char *);
static int check_cachename(const char *);
static void check_files(struct configuration *, const char *, int);
static void set_keep_hot_count(struct configuration *, const char *, int);
static void set_negative_policy(struct configuration *, const char *,
enum cache_policy_t);
static void set_negative_time_to_live(struct configuration *,
const char *, int);
static void set_positive_policy(struct configuration *, const char *,
enum cache_policy_t);
static void set_perform_actual_lookups(struct configuration *, const char *,
int);
static void set_positive_time_to_live(struct configuration *,
const char *, int);
static void set_suggested_size(struct configuration *, const char *,
int size);
static void set_threads_num(struct configuration *, int);
static int strbreak(char *, char **, int);
static int
strbreak(char *str, char **fields, int fields_size)
{
char *c = str;
int i, num;
TRACE_IN(strbreak);
num = 0;
for (i = 0;
((*fields =
strsep(i < fields_size ? &c : NULL, "\n\t ")) != NULL);
++i)
if ((*(*fields)) != '\0') {
++fields;
++num;
}
TRACE_OUT(strbreak);
return (num);
}
/*
* Tries to find the configuration entry with the specified name. If search
* fails, the new entry with the default parameters will be created.
*/
static struct configuration_entry *
find_create_entry(struct configuration *config,
const char *entry_name)
{
struct configuration_entry *entry = NULL;
int res;
TRACE_IN(find_create_entry);
entry = configuration_find_entry(config, entry_name);
if (entry == NULL) {
entry = create_def_configuration_entry(entry_name);
assert( entry != NULL);
res = add_configuration_entry(config, entry);
assert(res == 0);
}
TRACE_OUT(find_create_entry);
return (entry);
}
/*
* The vast majority of the functions below corresponds to the particular
* keywords in the configuration file.
*/
static void
enable_cache(struct configuration *config, const char *entry_name, int flag)
{
struct configuration_entry *entry;
TRACE_IN(enable_cache);
entry = find_create_entry(config, entry_name);
entry->enabled = flag;
TRACE_OUT(enable_cache);
}
static void
set_positive_time_to_live(struct configuration *config,
const char *entry_name, int ttl)
{
struct configuration_entry *entry;
struct timeval lifetime;
TRACE_IN(set_positive_time_to_live);
assert(ttl >= 0);
assert(entry_name != NULL);
memset(&lifetime, 0, sizeof(struct timeval));
lifetime.tv_sec = ttl;
entry = find_create_entry(config, entry_name);
memcpy(&entry->positive_cache_params.max_lifetime,
&lifetime, sizeof(struct timeval));
memcpy(&entry->mp_cache_params.max_lifetime,
&lifetime, sizeof(struct timeval));
TRACE_OUT(set_positive_time_to_live);
}
static void
set_negative_time_to_live(struct configuration *config,
const char *entry_name, int nttl)
{
struct configuration_entry *entry;
struct timeval lifetime;
TRACE_IN(set_negative_time_to_live);
assert(nttl > 0);
assert(entry_name != NULL);
memset(&lifetime, 0, sizeof(struct timeval));
lifetime.tv_sec = nttl;
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
memcpy(&entry->negative_cache_params.max_lifetime,
&lifetime, sizeof(struct timeval));
TRACE_OUT(set_negative_time_to_live);
}
/*
* Hot count is actually the elements size limit.
*/
static void
set_keep_hot_count(struct configuration *config,
const char *entry_name, int count)
{
struct configuration_entry *entry;
TRACE_IN(set_keep_hot_count);
assert(count >= 0);
assert(entry_name != NULL);
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
entry->positive_cache_params.max_elemsize = count;
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
entry->negative_cache_params.max_elemsize = count;
TRACE_OUT(set_keep_hot_count);
}
static void
set_positive_policy(struct configuration *config,
const char *entry_name, enum cache_policy_t policy)
{
struct configuration_entry *entry;
TRACE_IN(set_positive_policy);
assert(entry_name != NULL);
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
entry->positive_cache_params.policy = policy;
TRACE_OUT(set_positive_policy);
}
static void
set_negative_policy(struct configuration *config,
const char *entry_name, enum cache_policy_t policy)
{
struct configuration_entry *entry;
TRACE_IN(set_negative_policy);
assert(entry_name != NULL);
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
entry->negative_cache_params.policy = policy;
TRACE_OUT(set_negative_policy);
}
static void
set_perform_actual_lookups(struct configuration *config,
const char *entry_name, int flag)
{
struct configuration_entry *entry;
TRACE_IN(set_perform_actual_lookups);
assert(entry_name != NULL);
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
entry->perform_actual_lookups = flag;
TRACE_OUT(set_perform_actual_lookups);
}
static void
set_suggested_size(struct configuration *config,
const char *entry_name, int size)
{
struct configuration_entry *entry;
TRACE_IN(set_suggested_size);
assert(config != NULL);
assert(entry_name != NULL);
assert(size > 0);
entry = find_create_entry(config, entry_name);
assert(entry != NULL);
entry->positive_cache_params.cache_entries_size = size;
entry->negative_cache_params.cache_entries_size = size;
TRACE_OUT(set_suggested_size);
}
static void
check_files(struct configuration *config, const char *entry_name, int flag)
{
TRACE_IN(check_files);
assert(entry_name != NULL);
TRACE_OUT(check_files);
}
static int
get_yesno(const char *str)
{
if (strcmp(str, "yes") == 0)
return (1);
else if (strcmp(str, "no") == 0)
return (0);
else
return (-1);
}
static int
get_number(const char *str, int low, int max)
{
char *end = NULL;
int res = 0;
if (str[0] == '\0')
return (-1);
res = strtol(str, &end, 10);
if (*end != '\0')
return (-1);
else
if (((res >= low) || (low == -1)) &&
((res <= max) || (max == -1)))
return (res);
else
return (-2);
}
static enum cache_policy_t
get_policy(const char *str)
{
if (strcmp(str, "fifo") == 0)
return (CPT_FIFO);
else if (strcmp(str, "lru") == 0)
return (CPT_LRU);
else if (strcmp(str, "lfu") == 0)
return (CPT_LFU);
return (-1);
}
static int
check_cachename(const char *str)
{
assert(str != NULL);
return ((strlen(str) > 0) ? 0 : -1);
}
static void
set_threads_num(struct configuration *config, int value)
{
assert(config != NULL);
config->threads_num = value;
}
/*
* The main configuration routine. Its implementation is hugely inspired by the
* the same routine implementation in Solaris NSCD.
*/
int
parse_config_file(struct configuration *config,
const char *fname, char const **error_str, int *error_line)
{
FILE *fin;
char buffer[255];
char *fields[128];
int field_count, line_num, value;
int res;
TRACE_IN(parse_config_file);
assert(config != NULL);
assert(fname != NULL);
fin = fopen(fname, "r");
if (fin == NULL) {
TRACE_OUT(parse_config_file);
return (-1);
}
res = 0;
line_num = 0;
memset(buffer, 0, sizeof(buffer));
while ((res == 0) && (fgets(buffer, sizeof(buffer) - 1, fin) != NULL)) {
field_count = strbreak(buffer, fields, sizeof(fields));
++line_num;
if (field_count == 0)
continue;
switch (fields[0][0]) {
case '#':
case '\0':
continue;
case 'e':
if ((field_count == 3) &&
(strcmp(fields[0], "enable-cache") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_yesno(fields[2])) != -1)) {
enable_cache(config, fields[1], value);
continue;
}
break;
case 'd':
if ((field_count == 2) &&
(strcmp(fields[0], "debug-level") == 0) &&
((value = get_number(fields[1], 0, 10)) != -1)) {
continue;
}
break;
case 'p':
if ((field_count == 3) &&
(strcmp(fields[0], "positive-time-to-live") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_number(fields[2], 0, -1)) != -1)) {
set_positive_time_to_live(config,
fields[1], value);
continue;
} else if ((field_count == 3) &&
(strcmp(fields[0], "positive-policy") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_policy(fields[2])) != -1)) {
set_positive_policy(config, fields[1], value);
continue;
} else if ((field_count == 3) &&
(strcmp(fields[0], "perform-actual-lookups") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_yesno(fields[2])) != -1)) {
set_perform_actual_lookups(config, fields[1],
value);
continue;
}
break;
case 'n':
if ((field_count == 3) &&
(strcmp(fields[0], "negative-time-to-live") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_number(fields[2], 0, -1)) != -1)) {
set_negative_time_to_live(config,
fields[1], value);
continue;
} else if ((field_count == 3) &&
(strcmp(fields[0], "negative-policy") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_policy(fields[2])) != -1)) {
set_negative_policy(config,
fields[1], value);
continue;
}
break;
case 's':
if ((field_count == 3) &&
(strcmp(fields[0], "suggested-size") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_number(fields[2], 1, -1)) != -1)) {
set_suggested_size(config, fields[1], value);
continue;
}
break;
case 't':
if ((field_count == 2) &&
(strcmp(fields[0], "threads") == 0) &&
((value = get_number(fields[1], 1, -1)) != -1)) {
set_threads_num(config, value);
continue;
}
break;
case 'k':
if ((field_count == 3) &&
(strcmp(fields[0], "keep-hot-count") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_number(fields[2], 0, -1)) != -1)) {
set_keep_hot_count(config,
fields[1], value);
continue;
}
break;
case 'c':
if ((field_count == 3) &&
(strcmp(fields[0], "check-files") == 0) &&
(check_cachename(fields[1]) == 0) &&
((value = get_yesno(fields[2])) != -1)) {
check_files(config,
fields[1], value);
continue;
}
break;
default:
break;
}
LOG_ERR_2("config file parser", "error in file "
"%s on line %d", fname, line_num);
*error_str = "syntax error";
*error_line = line_num;
res = -1;
}
fclose(fin);
TRACE_OUT(parse_config_file);
return (res);
}

View File

@ -1,35 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_PARSER_H__
#define __CACHED_PARSER_H__
extern int parse_config_file(struct configuration *,
const char *, char const **, int *);
#endif

View File

@ -1,550 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <assert.h>
#include <stdlib.h>
#include <string.h>
#include "debug.h"
#include "log.h"
#include "protocol.h"
/*
* Initializes the comm_element with any given type of data
*/
void
init_comm_element(struct comm_element *element, enum comm_element_t type)
{
TRACE_IN(init_comm_element);
memset(element, 0, sizeof(struct comm_element));
switch (type) {
case CET_WRITE_REQUEST:
init_cache_write_request(&element->c_write_request);
break;
case CET_WRITE_RESPONSE:
init_cache_write_response(&element->c_write_response);
break;
case CET_READ_REQUEST:
init_cache_read_request(&element->c_read_request);
break;
case CET_READ_RESPONSE:
init_cache_read_response(&element->c_read_response);
break;
case CET_TRANSFORM_REQUEST:
init_cache_transform_request(&element->c_transform_request);
break;
case CET_TRANSFORM_RESPONSE:
init_cache_transform_response(&element->c_transform_response);
break;
case CET_MP_WRITE_SESSION_REQUEST:
init_cache_mp_write_session_request(&element->c_mp_ws_request);
break;
case CET_MP_WRITE_SESSION_RESPONSE:
init_cache_mp_write_session_response(&element->c_mp_ws_response);
break;
case CET_MP_WRITE_SESSION_WRITE_REQUEST:
init_cache_mp_write_session_write_request(
&element->c_mp_ws_write_request);
break;
case CET_MP_WRITE_SESSION_WRITE_RESPONSE:
init_cache_mp_write_session_write_response(
&element->c_mp_ws_write_response);
break;
case CET_MP_READ_SESSION_REQUEST:
init_cache_mp_read_session_request(&element->c_mp_rs_request);
break;
case CET_MP_READ_SESSION_RESPONSE:
init_cache_mp_read_session_response(&element->c_mp_rs_response);
break;
case CET_MP_READ_SESSION_READ_RESPONSE:
init_cache_mp_read_session_read_response(
&element->c_mp_rs_read_response);
break;
case CET_UNDEFINED:
break;
default:
LOG_ERR_2("init_comm_element", "invalid communication element");
TRACE_OUT(init_comm_element);
return;
}
element->type = type;
TRACE_OUT(init_comm_element);
}
void
finalize_comm_element(struct comm_element *element)
{
TRACE_IN(finalize_comm_element);
switch (element->type) {
case CET_WRITE_REQUEST:
finalize_cache_write_request(&element->c_write_request);
break;
case CET_WRITE_RESPONSE:
finalize_cache_write_response(&element->c_write_response);
break;
case CET_READ_REQUEST:
finalize_cache_read_request(&element->c_read_request);
break;
case CET_READ_RESPONSE:
finalize_cache_read_response(&element->c_read_response);
break;
case CET_TRANSFORM_REQUEST:
finalize_cache_transform_request(&element->c_transform_request);
break;
case CET_TRANSFORM_RESPONSE:
finalize_cache_transform_response(
&element->c_transform_response);
break;
case CET_MP_WRITE_SESSION_REQUEST:
finalize_cache_mp_write_session_request(
&element->c_mp_ws_request);
break;
case CET_MP_WRITE_SESSION_RESPONSE:
finalize_cache_mp_write_session_response(
&element->c_mp_ws_response);
break;
case CET_MP_WRITE_SESSION_WRITE_REQUEST:
finalize_cache_mp_write_session_write_request(
&element->c_mp_ws_write_request);
break;
case CET_MP_WRITE_SESSION_WRITE_RESPONSE:
finalize_cache_mp_write_session_write_response(
&element->c_mp_ws_write_response);
break;
case CET_MP_READ_SESSION_REQUEST:
finalize_cache_mp_read_session_request(
&element->c_mp_rs_request);
break;
case CET_MP_READ_SESSION_RESPONSE:
finalize_cache_mp_read_session_response(
&element->c_mp_rs_response);
break;
case CET_MP_READ_SESSION_READ_RESPONSE:
finalize_cache_mp_read_session_read_response(
&element->c_mp_rs_read_response);
break;
case CET_UNDEFINED:
break;
default:
break;
}
element->type = CET_UNDEFINED;
TRACE_OUT(finalize_comm_element);
}
void
init_cache_write_request(struct cache_write_request *write_request)
{
TRACE_IN(init_cache_write_request);
memset(write_request, 0, sizeof(struct cache_write_request));
TRACE_OUT(init_cache_write_request);
}
void
finalize_cache_write_request(struct cache_write_request *write_request)
{
TRACE_IN(finalize_cache_write_request);
free(write_request->entry);
free(write_request->cache_key);
free(write_request->data);
TRACE_OUT(finalize_cache_write_request);
}
struct cache_write_request *
get_cache_write_request(struct comm_element *element)
{
TRACE_IN(get_cache_write_request);
assert(element->type == CET_WRITE_REQUEST);
TRACE_OUT(get_cache_write_request);
return (&element->c_write_request);
}
void
init_cache_write_response(struct cache_write_response *write_response)
{
TRACE_IN(init_cache_write_response);
memset(write_response, 0, sizeof(struct cache_write_response));
TRACE_OUT(init_cache_write_response);
}
void
finalize_cache_write_response(struct cache_write_response *write_response)
{
TRACE_IN(finalize_cache_write_response);
TRACE_OUT(finalize_cache_write_response);
}
struct cache_write_response *
get_cache_write_response(struct comm_element *element)
{
TRACE_IN(get_cache_write_response);
assert(element->type == CET_WRITE_RESPONSE);
TRACE_OUT(get_cache_write_response);
return (&element->c_write_response);
}
void
init_cache_read_request(struct cache_read_request *read_request)
{
TRACE_IN(init_cache_read_request);
memset(read_request, 0, sizeof(struct cache_read_request));
TRACE_OUT(init_cache_read_request);
}
void
finalize_cache_read_request(struct cache_read_request *read_request)
{
TRACE_IN(finalize_cache_read_request);
free(read_request->entry);
free(read_request->cache_key);
TRACE_OUT(finalize_cache_read_request);
}
struct cache_read_request *
get_cache_read_request(struct comm_element *element)
{
TRACE_IN(get_cache_read_request);
assert(element->type == CET_READ_REQUEST);
TRACE_OUT(get_cache_read_request);
return (&element->c_read_request);
}
void
init_cache_read_response(struct cache_read_response *read_response)
{
TRACE_IN(init_cache_read_response);
memset(read_response, 0, sizeof(struct cache_read_response));
TRACE_OUT(init_cache_read_response);
}
void
finalize_cache_read_response(struct cache_read_response *read_response)
{
TRACE_IN(finalize_cache_read_response);
free(read_response->data);
TRACE_OUT(finalize_cache_read_response);
}
struct cache_read_response *
get_cache_read_response(struct comm_element *element)
{
TRACE_IN(get_cache_read_response);
assert(element->type == CET_READ_RESPONSE);
TRACE_OUT(get_cache_read_response);
return (&element->c_read_response);
}
void
init_cache_transform_request(struct cache_transform_request *transform_request)
{
TRACE_IN(init_cache_transform_request);
memset(transform_request, 0, sizeof(struct cache_transform_request));
TRACE_OUT(init_cache_transform_request);
}
void
finalize_cache_transform_request(
struct cache_transform_request *transform_request)
{
TRACE_IN(finalize_cache_transform_request);
free(transform_request->entry);
TRACE_OUT(finalize_cache_transform_request);
}
struct cache_transform_request *
get_cache_transform_request(struct comm_element *element)
{
TRACE_IN(get_cache_transform_request);
assert(element->type == CET_TRANSFORM_REQUEST);
TRACE_OUT(get_cache_transform_request);
return (&element->c_transform_request);
}
void
init_cache_transform_response(
struct cache_transform_response *transform_response)
{
TRACE_IN(init_cache_transform_request);
memset(transform_response, 0, sizeof(struct cache_transform_response));
TRACE_OUT(init_cache_transform_request);
}
void
finalize_cache_transform_response(
struct cache_transform_response *transform_response)
{
TRACE_IN(finalize_cache_transform_response);
TRACE_OUT(finalize_cache_transform_response);
}
struct cache_transform_response *
get_cache_transform_response(struct comm_element *element)
{
TRACE_IN(get_cache_transform_response);
assert(element->type == CET_TRANSFORM_RESPONSE);
TRACE_OUT(get_cache_transform_response);
return (&element->c_transform_response);
}
void
init_cache_mp_write_session_request(
struct cache_mp_write_session_request *mp_ws_request)
{
TRACE_IN(init_cache_mp_write_session_request);
memset(mp_ws_request, 0,
sizeof(struct cache_mp_write_session_request));
TRACE_OUT(init_cache_mp_write_session_request);
}
void
finalize_cache_mp_write_session_request(
struct cache_mp_write_session_request *mp_ws_request)
{
TRACE_IN(finalize_cache_mp_write_session_request);
free(mp_ws_request->entry);
TRACE_OUT(finalize_cache_mp_write_session_request);
}
struct cache_mp_write_session_request *
get_cache_mp_write_session_request(struct comm_element *element)
{
TRACE_IN(get_cache_mp_write_session_request);
assert(element->type == CET_MP_WRITE_SESSION_REQUEST);
TRACE_OUT(get_cache_mp_write_session_request);
return (&element->c_mp_ws_request);
}
void
init_cache_mp_write_session_response(
struct cache_mp_write_session_response *mp_ws_response)
{
TRACE_IN(init_cache_mp_write_session_response);
memset(mp_ws_response, 0,
sizeof(struct cache_mp_write_session_response));
TRACE_OUT(init_cache_mp_write_session_response);
}
void
finalize_cache_mp_write_session_response(
struct cache_mp_write_session_response *mp_ws_response)
{
TRACE_IN(finalize_cache_mp_write_session_response);
TRACE_OUT(finalize_cache_mp_write_session_response);
}
struct cache_mp_write_session_response *
get_cache_mp_write_session_response(struct comm_element *element)
{
TRACE_IN(get_cache_mp_write_session_response);
assert(element->type == CET_MP_WRITE_SESSION_RESPONSE);
TRACE_OUT(get_cache_mp_write_session_response);
return (&element->c_mp_ws_response);
}
void
init_cache_mp_write_session_write_request(
struct cache_mp_write_session_write_request *mp_ws_write_request)
{
TRACE_IN(init_cache_mp_write_session_write_request);
memset(mp_ws_write_request, 0,
sizeof(struct cache_mp_write_session_write_request));
TRACE_OUT(init_cache_mp_write_session_write_response);
}
void
finalize_cache_mp_write_session_write_request(
struct cache_mp_write_session_write_request *mp_ws_write_request)
{
TRACE_IN(finalize_cache_mp_write_session_write_request);
free(mp_ws_write_request->data);
TRACE_OUT(finalize_cache_mp_write_session_write_request);
}
struct cache_mp_write_session_write_request *
get_cache_mp_write_session_write_request(struct comm_element *element)
{
TRACE_IN(get_cache_mp_write_session_write_request);
assert(element->type == CET_MP_WRITE_SESSION_WRITE_REQUEST);
TRACE_OUT(get_cache_mp_write_session_write_request);
return (&element->c_mp_ws_write_request);
}
void
init_cache_mp_write_session_write_response(
struct cache_mp_write_session_write_response *mp_ws_write_response)
{
TRACE_IN(init_cache_mp_write_session_write_response);
memset(mp_ws_write_response, 0,
sizeof(struct cache_mp_write_session_write_response));
TRACE_OUT(init_cache_mp_write_session_write_response);
}
void
finalize_cache_mp_write_session_write_response(
struct cache_mp_write_session_write_response *mp_ws_write_response)
{
TRACE_IN(finalize_cache_mp_write_session_write_response);
TRACE_OUT(finalize_cache_mp_write_session_write_response);
}
struct cache_mp_write_session_write_response *
get_cache_mp_write_session_write_response(struct comm_element *element)
{
TRACE_IN(get_cache_mp_write_session_write_response);
assert(element->type == CET_MP_WRITE_SESSION_WRITE_RESPONSE);
TRACE_OUT(get_cache_mp_write_session_write_response);
return (&element->c_mp_ws_write_response);
}
void
init_cache_mp_read_session_request(
struct cache_mp_read_session_request *mp_rs_request)
{
TRACE_IN(init_cache_mp_read_session_request);
memset(mp_rs_request, 0, sizeof(struct cache_mp_read_session_request));
TRACE_OUT(init_cache_mp_read_session_request);
}
void
finalize_cache_mp_read_session_request(
struct cache_mp_read_session_request *mp_rs_request)
{
TRACE_IN(finalize_cache_mp_read_session_request);
free(mp_rs_request->entry);
TRACE_OUT(finalize_cache_mp_read_session_request);
}
struct cache_mp_read_session_request *
get_cache_mp_read_session_request(struct comm_element *element)
{
TRACE_IN(get_cache_mp_read_session_request);
assert(element->type == CET_MP_READ_SESSION_REQUEST);
TRACE_OUT(get_cache_mp_read_session_request);
return (&element->c_mp_rs_request);
}
void
init_cache_mp_read_session_response(
struct cache_mp_read_session_response *mp_rs_response)
{
TRACE_IN(init_cache_mp_read_session_response);
memset(mp_rs_response, 0,
sizeof(struct cache_mp_read_session_response));
TRACE_OUT(init_cache_mp_read_session_response);
}
void
finalize_cache_mp_read_session_response(
struct cache_mp_read_session_response *mp_rs_response)
{
TRACE_IN(finalize_cache_mp_read_session_response);
TRACE_OUT(finalize_cache_mp_read_session_response);
}
struct cache_mp_read_session_response *
get_cache_mp_read_session_response(struct comm_element *element)
{
TRACE_IN(get_cache_mp_read_session_response);
assert(element->type == CET_MP_READ_SESSION_RESPONSE);
TRACE_OUT(get_cache_mp_read_session_response);
return (&element->c_mp_rs_response);
}
void
init_cache_mp_read_session_read_response(
struct cache_mp_read_session_read_response *mp_ws_read_response)
{
TRACE_IN(init_cache_mp_read_session_read_response);
memset(mp_ws_read_response, 0,
sizeof(struct cache_mp_read_session_read_response));
TRACE_OUT(init_cache_mp_read_session_read_response);
}
void
finalize_cache_mp_read_session_read_response(
struct cache_mp_read_session_read_response *mp_rs_read_response)
{
TRACE_IN(finalize_cache_mp_read_session_read_response);
free(mp_rs_read_response->data);
TRACE_OUT(finalize_cache_mp_read_session_read_response);
}
struct cache_mp_read_session_read_response *
get_cache_mp_read_session_read_response(struct comm_element *element)
{
TRACE_IN(get_cache_mp_read_session_read_response);
assert(element->type == CET_MP_READ_SESSION_READ_RESPONSE);
TRACE_OUT(get_cache_mp_read_session_read_response);
return (&element->c_mp_rs_read_response);
}

View File

@ -1,265 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_PROTOCOL_H__
#define __CACHED_PROTOCOL_H__
#include <stdlib.h>
/* maximum buffer size to receive - larger buffers are not allowed */
#define MAX_BUFFER_SIZE (1 << 20)
/* buffer size correctness checking routine */
#define BUFSIZE_CORRECT(x) (((x) > 0) && ((x) < MAX_BUFFER_SIZE))
#define BUFSIZE_INVALID(x) (!BUFSIZE_CORRECT(x))
/* structures below represent the data that are sent/received by the daemon */
struct cache_write_request
{
char *entry;
char *cache_key;
char *data;
size_t entry_length;
size_t cache_key_size;
size_t data_size;
};
struct cache_write_response
{
int error_code;
};
struct cache_read_request
{
char *entry;
char *cache_key;
size_t entry_length;
size_t cache_key_size;
};
struct cache_read_response
{
char *data; // ignored if error_code is not 0
size_t data_size; // ignored if error_code is not 0
int error_code;
};
enum transformation_type {
TT_USER = 0, // tranform only the entries of the caller
TT_ALL = 1 // transform all entries
};
struct cache_transform_request
{
char *entry; // ignored if entry_length is 0
size_t entry_length;
int transformation_type;
};
struct cache_transform_response
{
int error_code;
};
struct cache_mp_write_session_request {
char *entry;
size_t entry_length;
};
struct cache_mp_write_session_response {
int error_code;
};
struct cache_mp_write_session_write_request {
char *data;
size_t data_size;
};
struct cache_mp_write_session_write_response {
int error_code;
};
struct cache_mp_read_session_request {
char *entry;
size_t entry_length;
};
struct cache_mp_read_session_response {
int error_code;
};
struct cache_mp_read_session_read_response {
char *data;
size_t data_size;
int error_code;
};
enum comm_element_t {
CET_UNDEFINED = 0,
CET_WRITE_REQUEST = 1,
CET_WRITE_RESPONSE = 2,
CET_READ_REQUEST = 3,
CET_READ_RESPONSE = 4,
CET_TRANSFORM_REQUEST = 5,
CET_TRANSFORM_RESPONSE = 6,
CET_MP_WRITE_SESSION_REQUEST = 7,
CET_MP_WRITE_SESSION_RESPONSE = 8,
CET_MP_WRITE_SESSION_WRITE_REQUEST = 9,
CET_MP_WRITE_SESSION_WRITE_RESPONSE = 10,
CET_MP_WRITE_SESSION_CLOSE_NOTIFICATION = 11,
CET_MP_WRITE_SESSION_ABANDON_NOTIFICATION = 12,
CET_MP_READ_SESSION_REQUEST = 13,
CET_MP_READ_SESSION_RESPONSE = 14,
CET_MP_READ_SESSION_READ_REQUEST = 15,
CET_MP_READ_SESSION_READ_RESPONSE = 16,
CET_MP_READ_SESSION_CLOSE_NOTIFICATION = 17,
CET_MAX = 18
};
/*
* The comm_element is used as the holder of any known (defined above) data
* type that is to be sent/received.
*/
struct comm_element
{
union {
struct cache_write_request c_write_request;
struct cache_write_response c_write_response;
struct cache_read_request c_read_request;
struct cache_read_response c_read_response;
struct cache_transform_request c_transform_request;
struct cache_transform_response c_transform_response;
struct cache_mp_write_session_request c_mp_ws_request;
struct cache_mp_write_session_response c_mp_ws_response;
struct cache_mp_write_session_write_request c_mp_ws_write_request;
struct cache_mp_write_session_write_response c_mp_ws_write_response;
struct cache_mp_read_session_request c_mp_rs_request;
struct cache_mp_read_session_response c_mp_rs_response;
struct cache_mp_read_session_read_response c_mp_rs_read_response;
};
enum comm_element_t type;
};
extern void init_comm_element(struct comm_element *, enum comm_element_t type);
extern void finalize_comm_element(struct comm_element *);
/*
* For each type of data, there is three functions (init/finalize/get), that
* used with comm_element structure
*/
extern void init_cache_write_request(struct cache_write_request *);
extern void finalize_cache_write_request(struct cache_write_request *);
extern struct cache_write_request *get_cache_write_request(
struct comm_element *);
extern void init_cache_write_response(struct cache_write_response *);
extern void finalize_cache_write_response(struct cache_write_response *);
extern struct cache_write_response *get_cache_write_response(
struct comm_element *);
extern void init_cache_read_request(struct cache_read_request *);
extern void finalize_cache_read_request(struct cache_read_request *);
extern struct cache_read_request *get_cache_read_request(
struct comm_element *);
extern void init_cache_read_response(struct cache_read_response *);
extern void finalize_cache_read_response(struct cache_read_response *);
extern struct cache_read_response *get_cache_read_response(
struct comm_element *);
extern void init_cache_transform_request(struct cache_transform_request *);
extern void finalize_cache_transform_request(struct cache_transform_request *);
extern struct cache_transform_request *get_cache_transform_request(
struct comm_element *);
extern void init_cache_transform_response(struct cache_transform_response *);
extern void finalize_cache_transform_response(
struct cache_transform_response *);
extern struct cache_transform_response *get_cache_transform_response(
struct comm_element *);
extern void init_cache_mp_write_session_request(
struct cache_mp_write_session_request *);
extern void finalize_cache_mp_write_session_request(
struct cache_mp_write_session_request *);
extern struct cache_mp_write_session_request *
get_cache_mp_write_session_request(
struct comm_element *);
extern void init_cache_mp_write_session_response(
struct cache_mp_write_session_response *);
extern void finalize_cache_mp_write_session_response(
struct cache_mp_write_session_response *);
extern struct cache_mp_write_session_response *
get_cache_mp_write_session_response(struct comm_element *);
extern void init_cache_mp_write_session_write_request(
struct cache_mp_write_session_write_request *);
extern void finalize_cache_mp_write_session_write_request(
struct cache_mp_write_session_write_request *);
extern struct cache_mp_write_session_write_request *
get_cache_mp_write_session_write_request(struct comm_element *);
extern void init_cache_mp_write_session_write_response(
struct cache_mp_write_session_write_response *);
extern void finalize_cache_mp_write_session_write_response(
struct cache_mp_write_session_write_response *);
extern struct cache_mp_write_session_write_response *
get_cache_mp_write_session_write_response(struct comm_element *);
extern void init_cache_mp_read_session_request(
struct cache_mp_read_session_request *);
extern void finalize_cache_mp_read_session_request(
struct cache_mp_read_session_request *);
extern struct cache_mp_read_session_request *get_cache_mp_read_session_request(
struct comm_element *);
extern void init_cache_mp_read_session_response(
struct cache_mp_read_session_response *);
extern void finalize_cache_mp_read_session_response(
struct cache_mp_read_session_response *);
extern struct cache_mp_read_session_response *
get_cache_mp_read_session_response(
struct comm_element *);
extern void init_cache_mp_read_session_read_response(
struct cache_mp_read_session_read_response *);
extern void finalize_cache_mp_read_session_read_response(
struct cache_mp_read_session_read_response *);
extern struct cache_mp_read_session_read_response *
get_cache_mp_read_session_read_response(struct comm_element *);
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,110 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_QUERY_H__
#define __CACHED_QUERY_H__
#include <sys/types.h>
#include <stdlib.h>
#include <unistd.h>
#include "cachelib.h"
#include "config.h"
#include "protocol.h"
struct query_state;
struct configuration;
struct configuration_entry;
typedef int (*query_process_func)(struct query_state *);
typedef void (*query_destroy_func)(struct query_state *);
typedef ssize_t (*query_read_func)(struct query_state *, void *, size_t);
typedef ssize_t (*query_write_func)(struct query_state *, const void *,
size_t);
/*
* The query state structure contains the information to process all types of
* requests and to send all types of responses.
*/
struct query_state {
struct timeval creation_time;
struct timeval timeout;
struct comm_element request;
struct comm_element response;
struct configuration_entry *config_entry;
void *mdata;
query_process_func process_func; /* called on each event */
query_destroy_func destroy_func; /* called on destroy */
/*
* By substituting these functions we can opaquely send and received
* very large buffers
*/
query_write_func write_func; /* data write function */
query_read_func read_func; /* data read function */
char *eid_str; /* the user-identifying string (euid_egid_) */
size_t eid_str_length;
uid_t euid; /* euid of the caller, received via getpeereid */
uid_t uid; /* uid of the caller, received via credentials */
gid_t egid; /* egid of the caller, received via getpeereid */
gid_t gid; /* gid of the caller received via credentials */
size_t io_buffer_size;
size_t io_buffer_watermark;
size_t kevent_watermark; /* bytes to be sent/received */
int sockfd; /* the unix socket to read/write */
int kevent_filter; /* EVFILT_READ or EVFILT_WRITE */
int socket_failed; /* set to 1 if the socket doesn't work correctly */
/*
* These fields are used to opaquely proceed sending/receiving of
* the large buffers
*/
char *io_buffer;
char *io_buffer_p;
int io_buffer_filter;
int use_alternate_io;
};
extern int check_query_eids(struct query_state *);
extern ssize_t query_io_buffer_read(struct query_state *, void *, size_t);
extern ssize_t query_io_buffer_write(struct query_state *, const void *,
size_t);
extern ssize_t query_socket_read(struct query_state *, void *, size_t);
extern ssize_t query_socket_write(struct query_state *, const void *,
size_t);
extern struct query_state *init_query_state(int, size_t, uid_t, gid_t);
extern void destroy_query_state(struct query_state *);
#endif

View File

@ -1,36 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include "singletons.h"
struct configuration *s_configuration = NULL;
cache s_cache = INVALID_CACHE;
struct runtime_env *s_runtime_env = NULL;
struct agent_table *s_agent_table = NULL;

View File

@ -1,47 +0,0 @@
/*-
* Copyright (c) 2005 Michael Bushkov <bushman@rsu.ru>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef __CACHED_SINGLETONS_H__
#define __CACHED_SINGLETONS_H__
#include "cachelib.h"
#include "config.h"
#include "agent.h"
struct runtime_env {
int queue;
int sockfd;
int finished; /* for future use */
};
extern struct configuration *s_configuration;
extern cache s_cache;
extern struct runtime_env *s_runtime_env;
extern struct agent_table *s_agent_table;
#endif