Actually remove obsolete patches

Actually remove the obsolete patch files mentioned in the commit message to 24f60eb964, but missed to remove there.
This commit is contained in:
Edgar Fuß 2020-08-17 22:26:24 +02:00
parent 698835bff6
commit fc0e176e0b
6 changed files with 0 additions and 248 deletions

View File

@ -9,19 +9,14 @@ SHA1 (patch-configure.ac) = 77874524ded19078356a303b984c19bc85a86647
SHA1 (patch-src_bind.c) = ee9f6c5e7828010b4beecff4b2d3d8ff2aebc7ac
SHA1 (patch-src_collectd.conf.in) = ee885263d5dca87857c8d56069952db0d4ffef37
SHA1 (patch-src_cpu.c) = 099f04b0a156ae78f5f5809d6db639534d4556a1
SHA1 (patch-src_daemon_collectd.c) = 155da2fde889e961719fe6be47aa7fc25199fbe7
SHA1 (patch-src_daemon_common.h) = d1ef6a5257156a864bf896629870f0f83a858cff
SHA1 (patch-src_df.c) = de699d221b003b70c22308cf43f4d61daa4071f7
SHA1 (patch-src_disk.c) = a9e71366f75fcc760384d41651771738ec28823f
SHA1 (patch-src_entropy.c) = 77ddaf1df7ab898d3009f94bfcca82d16e22f9fe
SHA1 (patch-src_irq.c) = 51f0e61a58b439f5e4e8de873e887700fc3dd67e
SHA1 (patch-src_libcollectclient_network__buffer.c) = 541332bcffcbcbb2e398275f93e2aa1ae8861598
SHA1 (patch-src_lua.c) = 656934889976f489abb80a8244492f845a3bd9f8
SHA1 (patch-src_memory.c) = ddfa79d68ebbaeed3bf1f35acbadda111d0387af
SHA1 (patch-src_netstat__udp.c) = 4763c9c26a5639a84d1ed306b40c5b761110f632
SHA1 (patch-src_nfs.c) = a38e2b2096b0b7c1d52bba18dcbb78f3cdc264a9
SHA1 (patch-src_processes.c) = c1e4a5e45c2f489067b83164381b569f270b1a4e
SHA1 (patch-src_read_kafka.c) = 05334bd0ea51ab386afce0cb3336378fcc42d997
SHA1 (patch-src_statsd.c) = 35f4349d2d2c9bddc0f4770344f969157cd012f6
SHA1 (patch-src_swap.c) = 4ef887e0eddf5c21c121c951f64262781f71e984
SHA1 (patch-src_tcpconns.c) = dc6c1d361561f16623933b53ec127d88052236b2

View File

@ -1,14 +0,0 @@
$NetBSD: patch-src_daemon_collectd.c,v 1.1 2017/11/21 15:18:23 fhajny Exp $
Fix missing declaration.
--- src/daemon/collectd.c.orig 2017-11-18 09:03:27.350750556 +0000
+++ src/daemon/collectd.c
@@ -208,6 +208,7 @@ static int change_basedir(const char *or
} /* static int change_basedir (char *dir) */
#if HAVE_LIBKSTAT
+extern kstat_ctl_t *kc;
static void update_kstat(void) {
if (kc == NULL) {
if ((kc = kstat_open()) == NULL)

View File

@ -1,17 +0,0 @@
$NetBSD: patch-src_daemon_common.h,v 1.1 2017/11/21 15:18:23 fhajny Exp $
Need kstat.h if available.
--- src/daemon/common.h.orig 2017-11-18 09:03:27.350750556 +0000
+++ src/daemon/common.h
@@ -32,6 +32,10 @@
#include "plugin.h"
+#if HAVE_KSTAT_H
+#include <kstat.h>
+#endif
+
#if HAVE_PWD_H
#include <pwd.h>
#endif

View File

@ -1,134 +0,0 @@
$NetBSD: patch-src_disk.c,v $
Provide a working port to NetBSD.
--- src/disk.c.orig 2018-08-30 15:04:48.000000000 +0200
+++ src/disk.c 2018-08-30 15:04:49.000000000 +0200
@@ -135,6 +135,16 @@ static int numdisk;
static int pnumdisk;
/* #endif HAVE_PERFSTAT */
+#elif HAVE_SYSCTL && KERNEL_NETBSD
+
+#include <sys/sysctl.h>
+#include <sys/iostat.h>
+
+static struct io_sysctl *drives = NULL;
+static size_t ndrive = 0;
+
+/* #endif HAVE_SYSCTL && KERNEL_NETBSD */
+
#else
#error "No applicable input method."
#endif
@@ -253,7 +263,31 @@ static int disk_init(void) {
continue;
ksp[numdisk++] = ksp_chain;
}
-#endif /* HAVE_LIBKSTAT */
+/* #endif HAVE_LIBKSTAT */
+#elif HAVE_SYSCTL && KERNEL_NETBSD
+ int mib[3];
+ size_t size;
+
+ /* figure out number of drives */
+ mib[0] = CTL_HW;
+ mib[1] = HW_IOSTATS;
+ mib[2] = sizeof(struct io_sysctl);
+ if (sysctl(mib, 3, NULL, &size, NULL, 0) == -1) {
+ ERROR ("disk plugin: sysctl for ndrives failed");
+ return -1;
+ }
+ ndrive = size / sizeof(struct io_sysctl);
+
+ if (size == 0 ) {
+ ERROR ("disk plugin: no drives found");
+ return -1;
+ }
+ drives = (struct io_sysctl *)malloc(size);
+ if (drives == NULL) {
+ ERROR ("disk plugin: memory allocation failure");
+ return -1;
+ }
+#endif /* HAVE_SYSCTL && KERNEL_NETBSD */
return 0;
} /* int disk_init */
@@ -284,7 +318,7 @@ static void disk_submit(const char *plug
plugin_dispatch_values(&vl);
} /* void disk_submit */
-#if KERNEL_FREEBSD || KERNEL_LINUX
+#if KERNEL_FREEBSD || (HAVE_SYSCTL && KERNEL_NETBSD) || KERNEL_LINUX
static void submit_io_time(char const *plugin_instance, derive_t io_time,
derive_t weighted_time) {
value_list_t vl = VALUE_LIST_INIT;
@@ -300,7 +334,7 @@ static void submit_io_time(char const *p
plugin_dispatch_values(&vl);
} /* void submit_io_time */
-#endif /* KERNEL_FREEBSD || KERNEL_LINUX */
+#endif /* KERNEL_FREEBSD || (HAVE_SYSCTL && KERNEL_NETBSD) || KERNEL_LINUX */
#if KERNEL_LINUX
static void submit_in_progress(char const *disk_name, gauge_t in_progress) {
@@ -1017,7 +1051,58 @@ static int disk_read(void) {
1000000.0;
disk_submit(stat_disk[i].name, "disk_time", read_time, write_time);
}
-#endif /* defined(HAVE_PERFSTAT) */
+/* #endif defined(HAVE_PERFSTAT) */
+#elif HAVE_SYSCTL && KERNEL_NETBSD
+ int mib[3];
+ size_t size, i, nndrive;
+
+ /* figure out number of drives */
+ mib[0] = CTL_HW;
+ mib[1] = HW_IOSTATS;
+ mib[2] = sizeof(struct io_sysctl);
+ if (sysctl(mib, 3, NULL, &size, NULL, 0) == -1) {
+ ERROR ("disk plugin: sysctl for ndrives failed");
+ return -1;
+ }
+ nndrive = size / sizeof(struct io_sysctl);
+
+ if (size == 0 ) {
+ ERROR ("disk plugin: no drives found");
+ return -1;
+ }
+ /* number of drives changed, reallocate buffer */
+ if (nndrive != ndrive) {
+ drives = (struct io_sysctl *)realloc(drives, size);
+ if (drives == NULL) {
+ ERROR ("disk plugin: memory allocation failure");
+ return -1;
+ }
+ ndrive = nndrive;
+ }
+
+ /* get stats for all drives */
+ mib[0] = CTL_HW;
+ mib[1] = HW_IOSTATS;
+ mib[2] = sizeof(struct io_sysctl);
+ if (sysctl(mib, 3, drives, &size, NULL, 0) == -1) {
+ ERROR ("disk plugin: sysctl for drive stats failed");
+ return -1;
+ }
+
+ for (i = 0; i < ndrive; i++) {
+ if (drives[i].type != IOSTAT_DISK)
+ continue;
+ if (ignorelist_match(ignorelist, drives[i].name))
+ continue;
+
+ disk_submit(drives[i].name, "disk_octets",
+ drives[i].rbytes, drives[i].wbytes);
+ disk_submit(drives[i].name, "disk_ops",
+ drives[i].rxfer, drives[i].wxfer);
+ submit_io_time(drives[i].name,
+ drives[i].time_sec * 1000 + drives[i].time_usec / 1000, 0);
+ }
+#endif /* HAVE_SYSCTL && KERNEL_NETBSD */
return 0;
} /* int disk_read */

View File

@ -1,47 +0,0 @@
$NetBSD: patch-src_lua.c,v 1.2 2017/11/21 15:18:23 fhajny Exp $
Make it possible to register more than one reader/writer.
Upstream request: https://github.com/collectd/collectd/pull/2379
--- src/lua.c.orig 2017-11-18 09:03:27.358750191 +0000
+++ src/lua.c
@@ -281,9 +281,6 @@ static int lua_cb_register_read(lua_Stat
luaL_checktype(L, 1, LUA_TFUNCTION);
- char function_name[DATA_MAX_NAME_LEN];
- snprintf(function_name, sizeof(function_name), "lua/%s", lua_tostring(L, 1));
-
int callback_id = clua_store_callback(L, 1);
if (callback_id < 0)
return luaL_error(L, "%s", "Storing callback function failed");
@@ -298,6 +295,9 @@ static int lua_cb_register_read(lua_Stat
if (cb == NULL)
return luaL_error(L, "%s", "calloc failed");
+ char function_name[DATA_MAX_NAME_LEN];
+ snprintf(function_name, sizeof(function_name), "lua/%p", thread);
+
cb->lua_state = thread;
cb->callback_id = callback_id;
cb->lua_function_name = strdup(function_name);
@@ -325,9 +325,6 @@ static int lua_cb_register_write(lua_Sta
luaL_checktype(L, 1, LUA_TFUNCTION);
- char function_name[DATA_MAX_NAME_LEN] = "";
- snprintf(function_name, sizeof(function_name), "lua/%s", lua_tostring(L, 1));
-
int callback_id = clua_store_callback(L, 1);
if (callback_id < 0)
return luaL_error(L, "%s", "Storing callback function failed");
@@ -342,6 +339,9 @@ static int lua_cb_register_write(lua_Sta
if (cb == NULL)
return luaL_error(L, "%s", "calloc failed");
+ char function_name[DATA_MAX_NAME_LEN];
+ snprintf(function_name, sizeof(function_name), "lua/%p", thread);
+
cb->lua_state = thread;
cb->callback_id = callback_id;
cb->lua_function_name = strdup(function_name);

View File

@ -1,31 +0,0 @@
$NetBSD: patch-src_read_kafka.c $
Adapt to newer librdkafka.
From collect commit 7536b41b8dee524307d3f9d4bf8773b7ec8314f0.
--- src/write_kafka.c.orig
+++ src/write_kafka.c
@@ -77,6 +77,14 @@ static void kafka_log(const rd_kafka_t *rkt, int level, const char *fac,
}
#endif
+static rd_kafka_resp_err_t kafka_error() {
+#if RD_KAFKA_VERSION >= 0x000b00ff
+ return rd_kafka_last_error();
+#else
+ return rd_kafka_errno2err(errno);
+#endif
+}
+
static uint32_t kafka_hash(const char *keydata, size_t keylen) {
uint32_t hash = 5381;
for (; keylen > 0; keylen--)
@@ -147,7 +155,7 @@ static int kafka_handle(struct kafka_topic_context *ctx) /* {{{ */
if ((ctx->topic = rd_kafka_topic_new(ctx->kafka, ctx->topic_name,
topic_conf)) == NULL) {
ERROR("write_kafka plugin: cannot create topic : %s\n",
- rd_kafka_err2str(rd_kafka_errno2err(errno)));
+ rd_kafka_err2str(kafka_error()));
return errno;
}