1
0
mirror of https://git.FreeBSD.org/src.git synced 2024-12-25 11:37:56 +00:00
freebsd/contrib/openbsm/libbsm/bsm_mask.c
Robert Watson 3b97a967e1 Vendor branch import of TrustedBSD OpenBSM 1.0 alpha 5:
- Update install notes to indicate /etc files are to be installed manually.
- On systems without LOG_SECURITY, use LOG_AUTH.
- Convert to autoconf/automake in order to move to a more portable (not
  BSD-specific) build infrastructure, and more easy conditional building of
  components.  Currently, the primary feature loss is that automake does
  not have native support for manual symlinks.  This will be addressed in a
  future OpenBSM release.
- Add compat/queue.h, to be used on systems dated BSD queue macro libraries
  (as found on Linux).
- Rename CHANGELOG to HISTORY, as our change log doesn't follow some of the
  existing conventions for a CHANGELOG.
- Some private data structures moved from audit.h to audit_internal.h to
  prevent inappropriate use by applications and name space pollution.
- Improved detection and use of endian macros using autoconf.
- Avoid non-portable use of struct in6_addr, which is largely opaque.
- Avoid leaking BSD kernel socket related token code to user space in
  bsm_token.c.
- Teach System V IPC calls to look for Linux naming variations for certain
  struct ipc_perm fields.
- Test for audit system calls, and if not present, don't build
  bsm_wrappers.c, bsm_notify.c, audit(8), and auditd(8), which rely on
  those system calls.
- au_close() is not implemented on systems that don't have audit system
  calls, but au_close_buffer() is.
- Work around missing BSDisms in bsm_wrapper.c.
- Fix nested includes so including libbsm.h in an application on Linux
  picks up the necessary definitions.

Obtained from:	TrustedBSD Project
2006-03-04 16:45:52 +00:00

201 lines
5.1 KiB
C

/*
* Copyright (c) 2004 Apple Computer, Inc.
* Copyright (c) 2005 Robert N. M. Watson
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* $P4: //depot/projects/trustedbsd/openbsm/libbsm/bsm_mask.c#13 $
*/
#include <sys/types.h>
#include <config/config.h>
#ifdef HAVE_FULL_QUEUE_H
#include <sys/queue.h>
#else /* !HAVE_FULL_QUEUE_H */
#include <compat/queue.h>
#endif /* !HAVE_FULL_QUEUE_H */
#include <bsm/libbsm.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
/* MT-Safe */
static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
static int firsttime = 1;
/*
* XXX ev_cache, once created, sticks around until the calling program exits.
* This may or may not be a problem as far as absolute memory usage goes, but
* at least there don't appear to be any leaks in using the cache.
*
* XXXRW: Note that despite (mutex), load_event_table() could race with
* other consumers of the getauevents() API.
*/
struct audit_event_map {
char ev_name[AU_EVENT_NAME_MAX];
char ev_desc[AU_EVENT_DESC_MAX];
struct au_event_ent ev;
LIST_ENTRY(audit_event_map) ev_list;
};
static LIST_HEAD(, audit_event_map) ev_cache;
static struct audit_event_map *
audit_event_map_alloc(void)
{
struct audit_event_map *aemp;
aemp = malloc(sizeof(*aemp));
if (aemp == NULL)
return (aemp);
bzero(aemp, sizeof(*aemp));
aemp->ev.ae_name = aemp->ev_name;
aemp->ev.ae_desc = aemp->ev_desc;
return (aemp);
}
static void
audit_event_map_free(struct audit_event_map *aemp)
{
free(aemp);
}
/*
* When reading into the cache fails, we need to flush the entire cache to
* prevent it from containing some but not all records.
*/
static void
flush_cache(void)
{
struct audit_event_map *aemp;
/* XXX: Would assert 'mutex'. */
while ((aemp = LIST_FIRST(&ev_cache)) != NULL) {
LIST_REMOVE(aemp, ev_list);
audit_event_map_free(aemp);
}
}
static int
load_event_table(void)
{
struct audit_event_map *aemp;
struct au_event_ent *ep;
/*
* XXX: Would assert 'mutex'.
* Loading of the cache happens only once; dont check if cache is
* already loaded.
*/
LIST_INIT(&ev_cache);
setauevent(); /* Rewind to beginning of entries. */
do {
aemp = audit_event_map_alloc();
if (aemp == NULL) {
flush_cache();
return (-1);
}
ep = getauevent_r(&aemp->ev);
if (ep != NULL)
LIST_INSERT_HEAD(&ev_cache, aemp, ev_list);
else
audit_event_map_free(aemp);
} while (ep != NULL);
return (1);
}
/*
* Read the event with the matching event number from the cache.
*/
static struct au_event_ent *
read_from_cache(au_event_t event)
{
struct audit_event_map *elem;
/* XXX: Would assert 'mutex'. */
LIST_FOREACH(elem, &ev_cache, ev_list) {
if (elem->ev.ae_number == event)
return (&elem->ev);
}
return (NULL);
}
/*
* Check if the audit event is preselected against the preselection mask.
*/
int
au_preselect(au_event_t event, au_mask_t *mask_p, int sorf, int flag)
{
struct au_event_ent *ev;
au_class_t effmask = 0;
if (mask_p == NULL)
return (-1);
pthread_mutex_lock(&mutex);
if (firsttime) {
firsttime = 0;
if ( -1 == load_event_table()) {
pthread_mutex_unlock(&mutex);
return (-1);
}
}
switch (flag) {
case AU_PRS_REREAD:
flush_cache();
if (load_event_table() == -1) {
pthread_mutex_unlock(&mutex);
return (-1);
}
ev = read_from_cache(event);
break;
case AU_PRS_USECACHE:
ev = read_from_cache(event);
break;
default:
ev = NULL;
}
if (ev == NULL) {
pthread_mutex_unlock(&mutex);
return (-1);
}
if (sorf & AU_PRS_SUCCESS)
effmask |= (mask_p->am_success & ev->ae_class);
if (sorf & AU_PRS_FAILURE)
effmask |= (mask_p->am_failure & ev->ae_class);
pthread_mutex_unlock(&mutex);
if (effmask != 0)
return (1);
return (0);
}