diff options
270 files changed, 6285 insertions, 2955 deletions
diff --git a/.circleci/Dockerfile b/.circleci/Dockerfile index c488a7926..274edb8e4 100644 --- a/.circleci/Dockerfile +++ b/.circleci/Dockerfile @@ -6,6 +6,7 @@ FROM debian:9 # v1.0.1 - Run as non-root, add unzip, xz-utils # v1.0.2 - Add bzr # v1.0.3 - Verify usign signatures +# v1.0.4 - Add support for Python3 RUN apt update && apt install -y \ build-essential \ @@ -18,6 +19,7 @@ git \ libncurses5-dev \ libssl-dev \ python \ +python3 \ signify-openbsd \ subversion \ time \ diff --git a/.circleci/config.yml b/.circleci/config.yml index 12b64804f..111440797 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -2,7 +2,7 @@ version: 2.0 jobs: build: docker: - - image: docker.io/openwrtorg/packages-cci:v1.0.3 + - image: docker.io/openwrtorg/packages-cci:v1.0.4 environment: - SDK_HOST: "downloads.openwrt.org" - SDK_PATH: "snapshots/targets/ath79/generic" @@ -94,6 +94,11 @@ jobs: working_directory: ~/build_dir command: | tar Jxf ~/sdk/$SDK_FILE --strip=1 + touch .config + make prepare-tmpinfo scripts/config/conf + ./scripts/config/conf --defconfig=.config Config.in + make prereq + rm .config cat > feeds.conf <<EOF src-git base https://github.com/openwrt/openwrt.git;$BRANCH src-link packages $HOME/openwrt_packages @@ -145,7 +150,11 @@ jobs: for PKG in $PKGS ; do echo_blue "===+ Building: $PKG" - make "package/$PKG/compile" -j3 V=s + make "package/$PKG/compile" -j3 V=s || { + RET=$? + echo_red "===+ Building: $PKG failed, rebuilding with -j1 for human readable error log" + make "package/$PKG/compile" -j1 V=s; exit $RET + } done - store_artifacts: diff --git a/.github/issue_template b/.github/issue_template index 73602ae64..232baad19 100644 --- a/.github/issue_template +++ b/.github/issue_template @@ -1,9 +1,8 @@ Please make sure that the issue subject starts with `<package-name>: ` -This repo here is only for packages maintained in this repo. For base packages residing in the same repo as the build system and maintained by core devs, please consider opening tickets there for more timely responses +Also make sure that the package is maintained in this repository and not in base which should be submitted at https://bugs.openwrt.org or in the LuCI repository which should be submitted at https://github.com/openwrt/luci/issues. - - OpenWrt base system: https://bugs.openwrt.org - - Most LuCI packages: https://github.com/openwrt/luci/issues +Issues related to releases below 18.06 and forks are not supported or maintained and will be closed. # Issue template (remove lines from top till here) diff --git a/admin/netdata/Makefile b/admin/netdata/Makefile index c0471362d..7b6a4d1da 100644 --- a/admin/netdata/Makefile +++ b/admin/netdata/Makefile @@ -8,18 +8,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=netdata -PKG_VERSION:=1.14.0 -PKG_RELEASE:=2 +PKG_VERSION:=1.16.1 +PKG_RELEASE:=1 PKG_MAINTAINER:=Josef Schlehofer <pepe.schlehofer@gmail.com>, Daniel Engberg <daniel.engberg.lists@pyret.net> -PKG_LICENSE:=GPL-3.0+ +PKG_LICENSE:=GPL-3.0-or-later PKG_LICENSE_FILES:=COPYING PKG_CPE_ID:=cpe:/a:my-netdata:netdata PKG_SOURCE:=$(PKG_NAME)-v$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=https://github.com/netdata/netdata/releases/download/v$(PKG_VERSION) -PKG_HASH:=f3768f6927e3712dce73794c6943a12f4454410c872eb3dfd19af4f52296187a -PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-v$(PKG_VERSION) +PKG_SOURCE_URL:=https://codeload.github.com/netdata/netdata/tar.gz/v$(PKG_VERSION)? +PKG_HASH:=94492108a6e24e8b39c011ae35ff6f50a848d816af396fdf2b44655cecd78672 PKG_INSTALL:=1 PKG_FIXUP:=autoreconf @@ -30,9 +29,9 @@ include $(INCLUDE_DIR)/package.mk define Package/netdata SECTION:=admin CATEGORY:=Administration - DEPENDS:=+zlib +libuuid +libmnl + DEPENDS:=+zlib +libuuid +libmnl +libjson-c TITLE:=Real-time performance monitoring tool - URL:=https://my-netdata.io/ + URL:=https://www.netdata.cloud/ endef define Package/netdata/description @@ -53,7 +52,14 @@ CONFIGURE_ARGS += \ --disable-x86-sse \ --enable-lto \ --without-libcap \ - --disable-plugin-nfacct + --disable-https \ + --disable-dbengine \ + --disable-plugin-nfacct \ + --disable-plugin-freeipmi \ + --disable-plugin-cups \ + --disable-plugin-xenstat \ + --disable-backend-prometheus-remote-write \ + --enable-jsonc define Package/netdata/conffiles /etc/netdata/ @@ -63,6 +69,7 @@ define Package/netdata/install $(INSTALL_DIR) $(1)/etc/netdata/custom-plugins.d $(CP) $(PKG_INSTALL_DIR)/etc/netdata $(1)/etc $(CP) ./files/netdata.conf $(1)/etc/netdata + touch $(1)/etc/netdata/.opt-out-from-anonymous-statistics $(INSTALL_DIR) $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/lib/netdata $(1)/usr/lib $(CP) $(1)/usr/lib/netdata/conf.d/fping.conf $(1)/etc diff --git a/admin/netdata/patches/002-force-python3.patch b/admin/netdata/patches/002-force-python3.patch index 84d19259d..ef2f24b06 100644 --- a/admin/netdata/patches/002-force-python3.patch +++ b/admin/netdata/patches/002-force-python3.patch @@ -9,6 +9,6 @@ -exec "$(command -v python || command -v python3 || command -v python2 || -echo "ERROR python IS NOT AVAILABLE IN THIS SYSTEM")" "$0" "$@" # ''' +#!/usr/bin/python3 - + # -*- coding: utf-8 -*- # Description: diff --git a/devel/gcc/Makefile b/devel/gcc/Makefile index a63c855c4..e2eef4889 100644 --- a/devel/gcc/Makefile +++ b/devel/gcc/Makefile @@ -24,7 +24,7 @@ endef PKG_NAME:=gcc # PKG_VERSION=7.3.0 PKG_VERSION=7.4.0 -PKG_RELEASE:=4 +PKG_RELEASE:=5 PKG_SOURCE_URL:=@GNU/gcc/gcc-$(PKG_VERSION) PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_INSTALL:=1 @@ -147,6 +147,7 @@ define Build/Configure --disable-libvtv \ --disable-libcilkrts \ --disable-libmudflap \ + --disable-libmpx \ --disable-multilib \ --disable-libgomp \ --disable-libquadmath \ diff --git a/devel/patch/Makefile b/devel/patch/Makefile index 43049def1..8f1472c76 100644 --- a/devel/patch/Makefile +++ b/devel/patch/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=patch PKG_VERSION:=2.7.6 -PKG_RELEASE:=3 +PKG_RELEASE:=5 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=@GNU/patch diff --git a/devel/patch/patches/050-CVE-2019-13636.patch b/devel/patch/patches/050-CVE-2019-13636.patch new file mode 100644 index 000000000..e62c3d417 --- /dev/null +++ b/devel/patch/patches/050-CVE-2019-13636.patch @@ -0,0 +1,108 @@ +From dce4683cbbe107a95f1f0d45fabc304acfb5d71a Mon Sep 17 00:00:00 2001 +From: Andreas Gruenbacher <agruen@gnu.org> +Date: Mon, 15 Jul 2019 16:21:48 +0200 +Subject: Don't follow symlinks unless --follow-symlinks is given + +* src/inp.c (plan_a, plan_b), src/util.c (copy_to_fd, copy_file, +append_to_file): Unless the --follow-symlinks option is given, open files with +the O_NOFOLLOW flag to avoid following symlinks. So far, we were only doing +that consistently for input files. +* src/util.c (create_backup): When creating empty backup files, (re)create them +with O_CREAT | O_EXCL to avoid following symlinks in that case as well. +--- + src/inp.c | 12 ++++++++++-- + src/util.c | 14 +++++++++++--- + 2 files changed, 21 insertions(+), 5 deletions(-) + +diff --git a/src/inp.c b/src/inp.c +index 32d0919..22d7473 100644 +--- a/src/inp.c ++++ b/src/inp.c +@@ -238,8 +238,13 @@ plan_a (char const *filename) + { + if (S_ISREG (instat.st_mode)) + { +- int ifd = safe_open (filename, O_RDONLY|binary_transput, 0); ++ int flags = O_RDONLY | binary_transput; + size_t buffered = 0, n; ++ int ifd; ++ ++ if (! follow_symlinks) ++ flags |= O_NOFOLLOW; ++ ifd = safe_open (filename, flags, 0); + if (ifd < 0) + pfatal ("can't open file %s", quotearg (filename)); + +@@ -340,6 +345,7 @@ plan_a (char const *filename) + static void + plan_b (char const *filename) + { ++ int flags = O_RDONLY | binary_transput; + int ifd; + FILE *ifp; + int c; +@@ -353,7 +359,9 @@ plan_b (char const *filename) + + if (instat.st_size == 0) + filename = NULL_DEVICE; +- if ((ifd = safe_open (filename, O_RDONLY | binary_transput, 0)) < 0 ++ if (! follow_symlinks) ++ flags |= O_NOFOLLOW; ++ if ((ifd = safe_open (filename, flags, 0)) < 0 + || ! (ifp = fdopen (ifd, binary_transput ? "rb" : "r"))) + pfatal ("Can't open file %s", quotearg (filename)); + if (TMPINNAME_needs_removal) +diff --git a/src/util.c b/src/util.c +index 1cc08ba..fb38307 100644 +--- a/src/util.c ++++ b/src/util.c +@@ -388,7 +388,7 @@ create_backup (char const *to, const struct stat *to_st, bool leave_original) + + try_makedirs_errno = ENOENT; + safe_unlink (bakname); +- while ((fd = safe_open (bakname, O_CREAT | O_WRONLY | O_TRUNC, 0666)) < 0) ++ while ((fd = safe_open (bakname, O_CREAT | O_EXCL | O_WRONLY | O_TRUNC, 0666)) < 0) + { + if (errno != try_makedirs_errno) + pfatal ("Can't create file %s", quotearg (bakname)); +@@ -579,10 +579,13 @@ create_file (char const *file, int open_flags, mode_t mode, + static void + copy_to_fd (const char *from, int tofd) + { ++ int from_flags = O_RDONLY | O_BINARY; + int fromfd; + ssize_t i; + +- if ((fromfd = safe_open (from, O_RDONLY | O_BINARY, 0)) < 0) ++ if (! follow_symlinks) ++ from_flags |= O_NOFOLLOW; ++ if ((fromfd = safe_open (from, from_flags, 0)) < 0) + pfatal ("Can't reopen file %s", quotearg (from)); + while ((i = read (fromfd, buf, bufsize)) != 0) + { +@@ -625,6 +628,8 @@ copy_file (char const *from, char const *to, struct stat *tost, + else + { + assert (S_ISREG (mode)); ++ if (! follow_symlinks) ++ to_flags |= O_NOFOLLOW; + tofd = create_file (to, O_WRONLY | O_BINARY | to_flags, mode, + to_dir_known_to_exist); + copy_to_fd (from, tofd); +@@ -640,9 +645,12 @@ copy_file (char const *from, char const *to, struct stat *tost, + void + append_to_file (char const *from, char const *to) + { ++ int to_flags = O_WRONLY | O_APPEND | O_BINARY; + int tofd; + +- if ((tofd = safe_open (to, O_WRONLY | O_BINARY | O_APPEND, 0)) < 0) ++ if (! follow_symlinks) ++ to_flags |= O_NOFOLLOW; ++ if ((tofd = safe_open (to, to_flags, 0)) < 0) + pfatal ("Can't reopen file %s", quotearg (to)); + copy_to_fd (from, tofd); + if (close (tofd) != 0) +-- +cgit v1.0-41-gc330 + diff --git a/devel/patch/patches/060-CVE-2019-13638.patch b/devel/patch/patches/060-CVE-2019-13638.patch new file mode 100644 index 000000000..38caff628 --- /dev/null +++ b/devel/patch/patches/060-CVE-2019-13638.patch @@ -0,0 +1,38 @@ +From 3fcd042d26d70856e826a42b5f93dc4854d80bf0 Mon Sep 17 00:00:00 2001 +From: Andreas Gruenbacher <agruen@gnu.org> +Date: Fri, 6 Apr 2018 19:36:15 +0200 +Subject: Invoke ed directly instead of using the shell + +* src/pch.c (do_ed_script): Invoke ed directly instead of using a shell +command to avoid quoting vulnerabilities. +--- + src/pch.c | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/src/pch.c b/src/pch.c +index 4fd5a05..16e001a 100644 +--- a/src/pch.c ++++ b/src/pch.c +@@ -2459,9 +2459,6 @@ do_ed_script (char const *inname, char const *outname, + *outname_needs_removal = true; + copy_file (inname, outname, 0, exclusive, instat.st_mode, true); + } +- sprintf (buf, "%s %s%s", editor_program, +- verbosity == VERBOSE ? "" : "- ", +- outname); + fflush (stdout); + + pid = fork(); +@@ -2470,7 +2467,8 @@ do_ed_script (char const *inname, char const *outname, + else if (pid == 0) + { + dup2 (tmpfd, 0); +- execl ("/bin/sh", "sh", "-c", buf, (char *) 0); ++ assert (outname[0] != '!' && outname[0] != '-'); ++ execlp (editor_program, editor_program, "-", outname, (char *) NULL); + _exit (2); + } + else +-- +cgit v1.0-41-gc330 + diff --git a/lang/lua-openssl/Makefile b/lang/lua-openssl/Makefile index 1fcd57065..aa7341fbd 100644 --- a/lang/lua-openssl/Makefile +++ b/lang/lua-openssl/Makefile @@ -8,16 +8,16 @@ include $(TOPDIR)/rules.mk PKG_NAME:=lua-openssl -PKG_VERSION:=0.7.1 +PKG_VERSION:=0.7.4 PKG_RELEASE:=1 PKG_MAINTAINER:=Amnon Paz <pazamnon@gmail.com> PKG_LICENSE:=MIT PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz -PKG_MIRROR_HASH:=225e3fe09226ff72968a7f36a33e207d27332107456a754abbaa59f99a3038f3 +PKG_MIRROR_HASH:=c27cedee438de95877823b1ae0607556564d82c8692be40f43743ca9cc5a029a PKG_SOURCE_URL:=https://github.com/zhaozg/lua-openssl.git PKG_SOURCE_PROTO:=git -PKG_SOURCE_VERSION:=b104bbe914d279276560f188854036075b99f724 +PKG_SOURCE_VERSION:=96effbaf477ca205f1787aaf1cce643bd208066b PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) include $(INCLUDE_DIR)/package.mk diff --git a/lang/lua-openssl/patches/0001-Revise-Makefile.patch b/lang/lua-openssl/patches/0001-Revise-Makefile.patch index 1a1ef9d03..44f465634 100644 --- a/lang/lua-openssl/patches/0001-Revise-Makefile.patch +++ b/lang/lua-openssl/patches/0001-Revise-Makefile.patch @@ -1,98 +1,148 @@ +diff --git a/Makefile b/Makefile +index 10c1142..2a94df4 100644 --- a/Makefile +++ b/Makefile -@@ -1,58 +1,36 @@ - T=openssl
-+.PHONY: install clean
-
--PREFIX ?=/usr/local
--LIB_OPTION ?= -shared
-+PKGC ?= pkg-config
-
--#Lua auto detect
--LUA_VERSION ?= $(shell pkg-config luajit --print-provides)
--ifeq ($(LUA_VERSION),) ############ Not use luajit
--LUAV ?= $(shell lua -e "_,_,v=string.find(_VERSION,'Lua (.+)');print(v)")
--LUA_CFLAGS ?= -I$(PREFIX)/include/lua$(LUAV)
--LUA_LIBS ?= -L$(PREFIX)/lib
--LUA_LIBDIR ?= $(PREFIX)/lib/lua/$(LUAV)
--else
--LUAV ?= $(shell lua -e "_,_,v=string.find(_VERSION,'Lua (.+)');print(v)")
--LUA_CFLAGS ?= $(shell pkg-config luajit --cflags)
--LUA_LIBS ?= $(shell pkg-config luajit --libs)
--LUA_LIBDIR ?= $(PREFIX)/lib/lua/$(LUAV)
--endif
-+LIB_OPTION = -shared
-
--#OS auto detect
--SYS := $(shell gcc -dumpmachine)
-+# lua's package config can be under various names
-+LUAPKGC := $(shell for pc in lua lua5.1 lua5.2 lua5.3; do \
-+ $(PKGC) --exists $$pc && echo $$pc && break; \
-+ done)
-
--ifneq (, $(findstring linux, $(SYS)))
--# Do linux things
--LDFLAGS = -fPIC -lrt -ldl
--OPENSSL_LIBS ?= $(shell pkg-config openssl --libs)
--OPENSSL_CFLAGS ?= $(shell pkg-config openssl --cflags)
--CFLAGS = -fPIC $(OPENSSL_CFLAGS) $(LUA_CFLAGS)
--endif
--ifneq (, $(findstring apple, $(SYS)))
--# Do darwin things
--LDFLAGS = -fPIC -lrt -ldl
-+BUILD_DIR = $(shell pwd)
-+
-+# LUA include/libraries build flags
-+#LUAV ?= $(shell lua -e "_,_,v=string.find(_VERSION,'Lua (.+)');print(v)")
-+LUA_LIBDIR := $(shell $(PKGC) --variable=libdir $(LUAPKGC))
-+LUA_CFLAGS := $(shell $(PKGC) --cflags $(LUAPKGC))
-+LUA_LIBS := $(shell $(PKGC) --libs-only-L $(LUAPKGC))
-+#LUA_LIBDIR ?= $(PREFIX)/lib/lua/$(LUAV)
-+
-+# openssl include/libraries build flags
-+LDFLAGS += -lrt -ldl $(OPENSSL_LIBS) $(LUA_LIBS)
- OPENSSL_LIBS ?= $(shell pkg-config openssl --libs)
- OPENSSL_CFLAGS ?= $(shell pkg-config openssl --cflags)
--CFLAGS = -fPIC $(OPENSSL_CFLAGS) $(LUA_CFLAGS)
--endif
--ifneq (, $(findstring mingw, $(SYS)))
--# Do mingw things
--V = $(shell lua -e "v=string.gsub('$(LUAV)','%.','');print(v)")
--LDFLAGS = -mwindows -lcrypt32 -lssl -lcrypto -lws2_32 $(PREFIX)/bin/lua$(V).dll
--LUA_CFLAGS = -DLUA_LIB -DLUA_BUILD_AS_DLL -I$(PREFIX)/include/
--CFLAGS = $(OPENSSL_CFLAGS) $(LUA_CFLAGS)
--endif
--ifneq (, $(findstring cygwin, $(SYS)))
--# Do cygwin things
--OPENSSL_LIBS ?= $(shell pkg-config openssl --libs)
--OPENSSL_CFLAGS ?= $(shell pkg-config openssl --cflags)
--CFLAGS = -fPIC $(OPENSSL_CFLAGS) $(LUA_CFLAGS)
--endif
--#custome config
-+# openssl include/libraries build flags
-+LOCAL_INCLUDE = -I$(BUILD_DIR)/deps
-+CFLAGS += -fPIC -DPTHREADS $(LOCAL_INCLUDE) $(OPENSSL_CFLAGS) $(LUA_CFLAGS)
- ifeq (.config, $(wildcard .config))
- include .config
- endif
-
--LIBNAME= $T.so.$V
-+LIBNAME= $T.so
-
- #LIB_OPTION= -bundle -undefined dynamic_lookup #for MacOS X
-
-@@ -68,11 +46,12 @@ OBJS=src/asn1.o src/auxiliar.o src/bio.o - src/ec.o src/engine.o src/hmac.o src/lbn.o src/lhash.o src/misc.o src/ocsp.o src/openssl.o src/ots.o src/pkcs12.o src/pkcs7.o \
- src/pkey.o src/rsa.o src/ssl.o src/th-lock.o src/util.o src/x509.o src/xattrs.o src/xexts.o src/xname.o src/xstore.o src/xalgor.o src/callback.o
-
--.c.o:
-- $(CC) -c -o $@ $?
-+%.o: %.c
-+ $(CC) $(CFLAGS) -c $< -o $@
-
- all: $T.so
- echo $(SYS)
-+ $(CC) $(LDFLAGS) $(LIB_OPTION) $(OBJS) -o $@
-
- $T.so: $(OBJS)
- MACOSX_DEPLOYMENT_TARGET="10.3"; export MACOSX_DEPLOYMENT_TARGET; $(CC) $(CFLAGS) $(LIB_OPTION) -o $T.so $(OBJS) $(OPENSSL_LIBS) $(LUA_LIBS) $(LDFLAGS)
+@@ -1,121 +1,52 @@ + T=openssl ++.PHONY: install clean + +-PREFIX ?=/usr/local +-CC := $(CROSS)$(CC) +-AR := $(CROSS)$(AR) +-LD := $(CROSS)$(LD) ++PKGC ?= pkg-config + +-#OS auto detect +-ifneq (,$(TARGET_SYS)) +- SYS := $(TARGET_SYS) +-else +- SYS := $(shell gcc -dumpmachine) +-endif +- +-#Lua auto detect +-LUA_VERSION := $(shell pkg-config luajit --print-provides) +-ifeq ($(LUA_VERSION),) +- # Not found luajit package, try lua +- LUA_VERSION := $(shell pkg-config lua --print-provides) +- ifeq ($(LUA_VERSION),) +- # Not found lua package, try from prefix +- LUA_VERSION := $(shell lua -e "_,_,v=string.find(_VERSION,'Lua (.+)');print(v)") +- LUA_CFLAGS ?= -I$(PREFIX)/include/lua$(LUA_VERSION) +- LUA_LIBS ?= -L$(PREFIX)/lib -llua +- LUA_LIBDIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) +- else +- # Found lua package +- LUA_VERSION := $(shell lua -e "_,_,v=string.find(_VERSION,'Lua (.+)');print(v)") +- LUA_CFLAGS ?= $(shell pkg-config lua --cflags) +- LUA_LIBS ?= $(shell pkg-config lua --libs) +- LUA_LIBDIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) +- endif +-else +- # Found luajit package +- LUA_VERSION := $(shell luajit -e "_,_,v=string.find(_VERSION,'Lua (.+)');print(v)") +- LUA_CFLAGS ?= $(shell pkg-config luajit --cflags) +- LUA_LIBS ?= $(shell pkg-config luajit --libs) +- LUA_LIBDIR ?= $(PREFIX)/lib/lua/$(LUA_VERSION) +-endif +- +-#OpenSSL auto detect +-OPENSSL_CFLAGS ?= $(shell pkg-config openssl --cflags) +-OPENSSL_LIBS ?= $(shell pkg-config openssl --static --libs) +- +-ifneq (, $(findstring linux, $(SYS))) +- # Do linux things +- CFLAGS = -fpic +- LDFLAGS = -Wl,--no-undefined -fpic -lrt -ldl -lm +-endif +- +-ifneq (, $(findstring apple, $(SYS))) +- # Do darwin things +- CFLAGS = -fPIC +- LDFLAGS = -fPIC -undefined dynamic_lookup -ldl +- #MACOSX_DEPLOYMENT_TARGET="10.3" +- CC := MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET} $(CC) +-endif ++LIB_OPTION = -shared ++LUAPKGC = lua + +-ifneq (, $(findstring mingw, $(SYS))) +- # Do mingw things +- CFLAGS = -DLUA_LIB -DLUA_BUILD_AS_DLL -DWIN32_LEAN_AND_MEAN +-endif +- +-ifneq (, $(findstring cygwin, $(SYS))) +- # Do cygwin things +- CFLAGS = -fPIC +-endif ++# LUA include/libraries build flags ++LUA_CFLAGS := $(shell $(PKGC) --cflags $(LUAPKGC)) ++LUA_LIBS_DIR := $(shell $(PKGC) --libs $(LUAPKGC)) + +-ifneq (, $(findstring iOS, $(SYS))) +- # Do iOS things +- CFLAGS = -fPIC +- LDFLAGS = -fPIC -ldl +-endif ++# openssl include/libraries build flags ++OPENSSL_CFLAGS ?= $(shell $(PKGC) openssl --cflags) ++OPENSSL_LIBS_DIR ?= $(shell $(PKGC) openssl --libs) + +-#custom config ++LDFLAGS = -Wl,--no-undefined -fpic -lrt -ldl -lm $(OPENSSL_LIBS_DIR) $(LUA_LIBS_DIR) ++CFLAGS += -fPIC -DPTHREADS $(OPENSSL_CFLAGS) $(LUA_CFLAGS) + ifeq (.config, $(wildcard .config)) +- include .config ++include .config + endif + +-LIBNAME= $T.so.$V ++LIBNAME= $T.so + +-CFLAGS += $(OPENSSL_CFLAGS) $(LUA_CFLAGS) $(TARGET_FLAGS) +-LDFLAGS += -shared $(OPENSSL_LIBS) $(LUA_LIBS) + # Compilation directives + WARN_MIN = -Wall -Wno-unused-value + WARN = -Wall + WARN_MOST = $(WARN) -W -Waggregate-return -Wcast-align -Wmissing-prototypes -Wnested-externs -Wshadow -Wwrite-strings -pedantic + CFLAGS += -g $(WARN_MIN) -DPTHREADS -Ideps -Ideps/lua-compat -Ideps/auxiliar + +- + OBJS=src/asn1.o deps/auxiliar/auxiliar.o src/bio.o src/cipher.o src/cms.o src/compat.o src/crl.o src/csr.o src/dh.o src/digest.o src/dsa.o \ + src/ec.o src/engine.o src/hmac.o src/lbn.o src/lhash.o src/misc.o src/ocsp.o src/openssl.o src/ots.o src/pkcs12.o src/pkcs7.o \ + src/pkey.o src/rsa.o src/ssl.o src/th-lock.o src/util.o src/x509.o src/xattrs.o src/xexts.o src/xname.o src/xstore.o \ + src/xalgor.o src/callback.o src/srp.o deps/auxiliar/subsidiar.o + +-.c.o: +- $(CC) $(CFLAGS) -c -o $@ $? ++%.o: %.c ++ $(CC) $(CFLAGS) -c $< -o $@ + + all: $T.so +- @echo "Target system: "$(SYS) +- +-$T.so: lib$T.a +- $(CC) -o $@ src/openssl.o -L. -l$T $(LDFLAGS) ++ echo $(SYS) ++ $(CC) $(LDFLAGS) $(LIB_OPTION) $(OBJS) -o $@ + +-lib$T.a: $(OBJS) +- $(AR) rcs $@ $? ++$T.so: $(OBJS) ++ echo "LUA LIBS_DIR: " $(LUA_LIBS_DIR) ++ $(CC) $(CFLAGS) $(LIB_OPTION) -o $T.so $(OBJS) $(LDFLAGS) + + install: all + mkdir -p $(LUA_LIBDIR) + cp $T.so $(LUA_LIBDIR) + +-info: +- @echo "Target system: "$(SYS) +- @echo "CC:" $(CC) +- @echo "AR:" $(AR) +- @echo "PREFIX:" $(PREFIX) +- + clean: +- rm -f $T.so lib$T.a $(OBJS) +- +-# vim: ts=8 sw=8 noet ++ rm -f $T.so $(OBJS) diff --git a/lang/lua-openssl/patches/0010-Fix-Linkage.patch b/lang/lua-openssl/patches/0010-Fix-Linkage.patch deleted file mode 100644 index 52bcf1ae8..000000000 --- a/lang/lua-openssl/patches/0010-Fix-Linkage.patch +++ /dev/null @@ -1,36 +0,0 @@ -diff --git a/deps/lua-compat/c-api/compat-5.3.c b/deps/lua-compat/c-api/compat-5.3.c -index 4395bbc..4a8877c 100644 ---- a/deps/lua-compat/c-api/compat-5.3.c -+++ b/deps/lua-compat/c-api/compat-5.3.c -@@ -491,17 +491,6 @@ COMPAT53_API int lua_geti (lua_State *L, int index, lua_Integer i) { - } - - --COMPAT53_API int lua_isinteger (lua_State *L, int index) { -- if (lua_type(L, index) == LUA_TNUMBER) { -- lua_Number n = lua_tonumber(L, index); -- lua_Integer i = lua_tointeger(L, index); -- if (i == n) -- return 1; -- } -- return 0; --} -- -- - static void compat53_reverse (lua_State *L, int a, int b) { - for (; a < b; ++a, --b) { - lua_pushvalue(L, a); -diff --git a/deps/lua-compat/c-api/compat-5.3.h b/deps/lua-compat/c-api/compat-5.3.h -index 2309294..8e67bae 100644 ---- a/deps/lua-compat/c-api/compat-5.3.h -+++ b/deps/lua-compat/c-api/compat-5.3.h -@@ -244,9 +244,6 @@ typedef int (*lua_KFunction)(lua_State *L, int status, lua_KContext ctx); - #define lua_geti COMPAT53_CONCAT(COMPAT53_PREFIX, _geti) - COMPAT53_API int lua_geti (lua_State *L, int index, lua_Integer i); - --#define lua_isinteger COMPAT53_CONCAT(COMPAT53_PREFIX, _isinteger) --COMPAT53_API int lua_isinteger (lua_State *L, int index); -- - #define lua_numbertointeger(n, p) \ - ((*(p) = (lua_Integer)(n)), 1) - diff --git a/lang/lua-openssl/patches/0020-use-X509_REQ_to_X509-in-openssl-1.1.patch b/lang/lua-openssl/patches/0020-use-X509_REQ_to_X509-in-openssl-1.1.patch deleted file mode 100644 index 930b37e59..000000000 --- a/lang/lua-openssl/patches/0020-use-X509_REQ_to_X509-in-openssl-1.1.patch +++ /dev/null @@ -1,31 +0,0 @@ ---- a/src/csr.c -+++ b/src/csr.c -@@ -38,7 +38,7 @@ static LUA_FUNCTION(openssl_csr_read) - return openssl_pushresult(L, 0); - } -
--
-+#if OPENSSL_VERSION_NUMBER < 0x10100000L
- static X509 *X509_REQ_to_X509_ex(X509_REQ *r, int days, EVP_PKEY *pkey, const EVP_MD* md)
- {
- X509 *ret = NULL;
-@@ -91,14 +91,19 @@ static X509 *X509_REQ_to_X509_ex(X509_RE - }
- return (ret);
- }
-+#endif
- - static LUA_FUNCTION(openssl_csr_to_x509) - { - X509_REQ * csr = CHECK_OBJECT(1, X509_REQ, "openssl.x509_req"); - EVP_PKEY * pkey = CHECK_OBJECT(2, EVP_PKEY, "openssl.evp_pkey"); - int days = luaL_optint(L, 3, 365); -+#if OPENSSL_VERSION_NUMBER < 0x10100000L - const EVP_MD* md = get_digest(L, 4, "sha256"); - X509* cert = X509_REQ_to_X509_ex(csr, days, pkey, md); -+#else -+ X509* cert = X509_REQ_to_X509(csr, days, pkey); -+#endif - if (cert) - { - PUSH_OBJECT(cert, "openssl.x509"); diff --git a/lang/luaposix/Makefile b/lang/luaposix/Makefile index 92ff86d0e..f462d18da 100644 --- a/lang/luaposix/Makefile +++ b/lang/luaposix/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=luaposix PKG_VERSION:=v33.2.1 -PKG_RELEASE:=5 +PKG_RELEASE:=6 PKG_SOURCE:=release-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://github.com/luaposix/luaposix/archive/ @@ -49,13 +49,6 @@ ifeq ($(CONFIG_USE_MUSL),y) TARGET_CFLAGS += -D_XOPEN_REALTIME=1 endif -ifneq ($(CONFIG_USE_GLIBC),) - ifeq ($(CONFIG_EGLIBC_OPTION_EGLIBC_UTMP),) - TARGET_CFLAGS += -DNO_GETLOGIN - endif -endif - - define Package/luaposix/install $(INSTALL_DIR) $(1)/usr/lib/lua/posix $(INSTALL_BIN) $(PKG_BUILD_DIR)/ext/posix/.libs/posix.so $(1)/usr/lib/lua diff --git a/lang/luaposix/patches/100-eglibc-compat.patch b/lang/luaposix/patches/100-eglibc-compat.patch deleted file mode 100644 index e4df63bfc..000000000 --- a/lang/luaposix/patches/100-eglibc-compat.patch +++ /dev/null @@ -1,30 +0,0 @@ -diff --git a/ext/posix/unistd.c b/ext/posix/unistd.c -index 9276640..69c8cef 100644 ---- a/ext/posix/unistd.c -+++ b/ext/posix/unistd.c -@@ -525,6 +525,7 @@ Pgetgroups(lua_State *L) - #endif - - -+#ifndef NO_GETLOGIN - /*** - Current logged-in user. - @treturn[1] string username, if successful -@@ -537,6 +538,7 @@ Pgetlogin(lua_State *L) - checknargs(L, 0); - return pushstringresult(getlogin()); - } -+#endif - - - /*** -@@ -1044,7 +1046,9 @@ static const luaL_Reg posix_unistd_fns[] = - LPOSIX_FUNC( Pgetegid ), - LPOSIX_FUNC( Pgeteuid ), - LPOSIX_FUNC( Pgetgid ), -+#ifndef NO_GETLOGIN - LPOSIX_FUNC( Pgetlogin ), -+#endif - LPOSIX_FUNC( Pgetpgrp ), - LPOSIX_FUNC( Pgetpid ), - LPOSIX_FUNC( Pgetppid ), diff --git a/lang/luasocket/Makefile b/lang/luasocket/Makefile index 9264788f3..b44636e47 100644 --- a/lang/luasocket/Makefile +++ b/lang/luasocket/Makefile @@ -8,33 +8,58 @@ include $(TOPDIR)/rules.mk PKG_NAME:=luasocket -PKG_SOURCE_VERSION:=6d5e40c324c84d9c1453ae88e0ad5bdd0a631448 -PKG_VERSION:=3.0-rc1-20130909 -PKG_RELEASE:=5 +PKG_SOURCE_DATE:=2019-04-21 +PKG_SOURCE_VERSION:=733af884f1aa18ff469bf3c4d18810e815853211 +PKG_RELEASE:=1 -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 -PKG_MIRROR_HASH:=d2fa075d8bd026c41e0eb1a634ac2ad8115dee8abb070720e8e91fab51f86ee4 -PKG_SOURCE_URL:=https://github.com/diegonehab/luasocket.git PKG_SOURCE_PROTO:=git -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) +PKG_SOURCE_URL:=https://github.com/diegonehab/luasocket +PKG_MIRROR_HASH:=60aef7544426cae3e6c7560a6e4ad556a04b879ca0ad0311645b2c513c872128 + +PKG_MAINTAINER:=W. Michael Petullo <mike@flyn.org> +PKG_LICENSE:=MIT +PKG_LICENSE_FILES:=LICENSE include $(INCLUDE_DIR)/package.mk -define Package/luasocket +define Package/luasocket/default SUBMENU:=Lua SECTION:=lang CATEGORY:=Languages + URL:=http://w3.impa.br/~diego/software/luasocket +endef + +define Package/luasocket + $(Package/luasocket/default) TITLE:=LuaSocket - URL:=http://luasocket.luaforge.net/ - MAINTAINER:=W. Michael Petullo <mike@flyn.org> DEPENDS:=+lua + VARIANT:=lua-51 + DEFAULT_VARIANT:=1 +endef + +define Package/luasocket5.3 + $(Package/luasocket/default) + TITLE:=LuaSocket 5.3 + DEPENDS:=+liblua5.3 + VARIANT:=lua-53 endef -define Package/luasocket/description +ifeq ($(BUILD_VARIANT),lua-51) + LUA_VERSION=5.1 +endif + +ifeq ($(BUILD_VARIANT),lua-53) + LUA_VERSION=5.3 +endif + + +define Package/luasocket/default/description LuaSocket is the most comprehensive networking support library for the Lua language. It provides easy access to TCP, UDP, DNS, SMTP, FTP, HTTP, MIME and much more. endef +Package/luasocket/description = $(Package/luasocket/default/description) +Package/luasocket5.3/description = $(Package/luasocket/default/description) define Build/Configure endef @@ -44,21 +69,31 @@ define Build/Compile LIBDIR="$(TARGET_LDFLAGS)" \ CC="$(TARGET_CC) $(TARGET_CFLAGS) $(TARGET_CPPFLAGS) $(FPIC)" \ LD="$(TARGET_CROSS)ld -shared" \ + LUAV=$(LUA_VERSION) LUAINC_linux_base=$(STAGING_DIR)/usr/include \ all endef - define Package/luasocket/install $(INSTALL_DIR) $(1)/usr/lib/lua $(INSTALL_DATA) $(PKG_BUILD_DIR)/src/{ltn12,mime,socket}.lua $(1)/usr/lib/lua - $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/mime.so.1.0.3 $(1)/usr/lib/lua - $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/socket.so.3.0-rc1 $(1)/usr/lib/lua + $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/mime-1.0.3.so $(1)/usr/lib/lua + $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/socket-3.0-rc1.so $(1)/usr/lib/lua $(INSTALL_DIR) $(1)/usr/lib/lua/mime - ln -sf ../mime.so.1.0.3 $(1)/usr/lib/lua/mime/core.so + ln -sf ../mime-1.0.3.so $(1)/usr/lib/lua/mime/core.so $(INSTALL_DIR) $(1)/usr/lib/lua/socket $(INSTALL_DATA) $(PKG_BUILD_DIR)/src/{ftp,http,smtp,tp,url,headers}.lua $(1)/usr/lib/lua/socket $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/unix.so $(1)/usr/lib/lua/socket - ln -sf ../socket.so.3.0-rc1 $(1)/usr/lib/lua/socket/core.so + ln -sf ../socket-3.0-rc1.so $(1)/usr/lib/lua/socket/core.so endef + +define Package/luasocket5.3/install + $(MAKE) -C $(PKG_BUILD_DIR)/src \ + DESTDIR="$(1)" \ + LUAV=$(LUA_VERSION) \ + install +endef + + $(eval $(call BuildPackage,luasocket)) +$(eval $(call BuildPackage,luasocket5.3)) diff --git a/lang/luasocket/patches/0001-Add-interface-support.patch b/lang/luasocket/patches/0001-Add-interface-support.patch index 3be42ed56..00c279969 100644 --- a/lang/luasocket/patches/0001-Add-interface-support.patch +++ b/lang/luasocket/patches/0001-Add-interface-support.patch @@ -18,7 +18,7 @@ new file mode 100644 index 0000000..db231aa --- /dev/null +++ b/src/if.c -@@ -0,0 +1,113 @@ +@@ -0,0 +1,117 @@ +/* + * $Id: if.c $ + * @@ -55,7 +55,11 @@ index 0000000..db231aa +{ + lua_pushstring(L, "iface"); + lua_newtable(L); ++#if LUA_VERSION_NUM < 503 + luaL_openlib(L, NULL, func, 0); ++#else ++ luaL_setfuncs(L, func, 0); ++#endif + lua_settable(L, -3); + return 0; +} @@ -169,7 +173,7 @@ diff --git a/src/luasocket.c b/src/luasocket.c index e6ee747..85d41a6 100644 --- a/src/luasocket.c +++ b/src/luasocket.c -@@ -31,6 +31,7 @@ +@@ -21,6 +21,7 @@ #include "tcp.h" #include "udp.h" #include "select.h" @@ -177,7 +181,7 @@ index e6ee747..85d41a6 100644 /*-------------------------------------------------------------------------*\ * Internal function prototypes -@@ -51,6 +52,7 @@ static const luaL_Reg mod[] = { +@@ -41,6 +42,7 @@ static const luaL_Reg mod[] = { {"tcp", tcp_open}, {"udp", udp_open}, {"select", select_open}, @@ -189,15 +193,16 @@ diff --git a/src/makefile b/src/makefile index 8d3521e..09d4882 100644 --- a/src/makefile +++ b/src/makefile -@@ -262,6 +262,7 @@ SOCKET_OBJS= \ - auxiliar.$(O) \ + +@@ -303,6 +303,7 @@ SOCKET_OBJS= \ + compat.$(O) \ options.$(O) \ inet.$(O) \ + if.$(O) \ $(SOCKET) \ except.$(O) \ select.$(O) \ -@@ -387,6 +388,7 @@ auxiliar.$(O): auxiliar.c auxiliar.h +@@ -440,6 +441,7 @@ auxiliar.$(O): auxiliar.c auxiliar.h buffer.$(O): buffer.c buffer.h io.h timeout.h except.$(O): except.c except.h inet.$(O): inet.c inet.h socket.h io.h timeout.h usocket.h @@ -209,17 +214,19 @@ diff --git a/src/options.c b/src/options.c index 8ac2a14..1c73e6f 100644 --- a/src/options.c +++ b/src/options.c -@@ -3,6 +3,9 @@ - * LuaSocket toolkit - \*=========================================================================*/ - #include <string.h> +@@ -7,7 +7,10 @@ + #include "options.h" + #include "inet.h" + #include <string.h> +- +#include <sys/types.h> +#include <sys/socket.h> +#include <net/if.h> - - #include "lauxlib.h" - -@@ -285,6 +288,12 @@ static int opt_ip6_setmembership(lua_State *L, p_socket ps, int level, int name) ++ + /*=========================================================================*\ + * Internal functions prototypes + \*=========================================================================*/ +@@ -388,6 +391,12 @@ static int opt_ip6_setmembership(lua_Sta if (!lua_isnil(L, -1)) { if (lua_isnumber(L, -1)) { val.ipv6mr_interface = (unsigned int) lua_tonumber(L, -1); @@ -232,6 +239,5 @@ index 8ac2a14..1c73e6f 100644 } else luaL_argerror(L, -1, "number 'interface' field expected"); } --- +-- 1.8.4.rc3 - diff --git a/lang/luasocket/patches/0301-Fix-mpc85xx-build.patch b/lang/luasocket/patches/0301-Fix-mpc85xx-build.patch index c3bf949c5..c759da100 100644 --- a/lang/luasocket/patches/0301-Fix-mpc85xx-build.patch +++ b/lang/luasocket/patches/0301-Fix-mpc85xx-build.patch @@ -1,6 +1,6 @@ --- a/src/makefile +++ b/src/makefile -@@ -345,18 +345,18 @@ none: +@@ -397,18 +398,18 @@ none: all: $(SOCKET_SO) $(MIME_SO) $(SOCKET_SO): $(SOCKET_OBJS) @@ -21,5 +21,5 @@ - $(LD) $(SERIAL_OBJS) $(LDFLAGS)$@ + $(CC) $(SERIAL_OBJS) $(LDFLAGS)$@ - install: + install: $(INSTALL_DIR) $(INSTALL_TOP_LDIR) diff --git a/lang/luasocket/patches/040-remove-fpic-and-warnings.patch b/lang/luasocket/patches/040-remove-fpic-and-warnings.patch index 95c17b143..647997aab 100644 --- a/lang/luasocket/patches/040-remove-fpic-and-warnings.patch +++ b/lang/luasocket/patches/040-remove-fpic-and-warnings.patch @@ -1,14 +1,14 @@ --- a/src/makefile +++ b/src/makefile -@@ -163,9 +163,8 @@ DEF_linux=-DLUASOCKET_$(DEBUG) -DLUA_$(COMPAT)_MODULE \ - -DLUASOCKET_API='__attribute__((visibility("default")))' \ - -DUNIX_API='__attribute__((visibility("default")))' \ - -DMIME_API='__attribute__((visibility("default")))' --CFLAGS_linux= -I$(LUAINC) $(DEF) -pedantic -Wall -Wshadow -Wextra \ -- -Wimplicit -O2 -ggdb3 -fpic -fvisibility=hidden +@@ -174,9 +174,8 @@ SO_linux=so + O_linux=o + CC_linux=gcc + DEF_linux=-DLUASOCKET_$(DEBUG) +-CFLAGS_linux=$(LUAINC:%=-I%) $(DEF) -Wall -Wshadow -Wextra \ +- -Wimplicit -O2 -ggdb3 -fpic -LDFLAGS_linux=-O -shared -fpic -o -+CFLAGS_linux= -I$(LUAINC) $(DEF) -fvisibility=hidden -+LDFLAGS_linux=-shared -o ++CFLAGS_linux=$(LUAINC:%=-I%) $(DEF) -O2 ++LDFLAGS_linux=-O -shared -o LD_linux=gcc SOCKET_linux=usocket.o diff --git a/lang/node-mozilla-iot-gateway/Config.in b/lang/node-mozilla-iot-gateway/Config.in deleted file mode 100644 index b7cf49748..000000000 --- a/lang/node-mozilla-iot-gateway/Config.in +++ /dev/null @@ -1,9 +0,0 @@ -if PACKAGE_node-mozilla-iot-gateway - - comment "Optional features" - - config MOIT_enable-plugin-support - bool "Enable packages needed for some plugins" - default y - -endif diff --git a/lang/node-mozilla-iot-gateway/Makefile b/lang/node-mozilla-iot-gateway/Makefile deleted file mode 100644 index 558e55f9d..000000000 --- a/lang/node-mozilla-iot-gateway/Makefile +++ /dev/null @@ -1,76 +0,0 @@ -# -# Copyright (C) 2018 Sartura Ltd. -# -# This is free software, licensed under the GNU General Public License v2. -# See /LICENSE for more information. -# - -include $(TOPDIR)/rules.mk - -PKG_NPM_NAME:=mozilla-iot-gateway -PKG_NAME:=node-$(PKG_NPM_NAME) -PKG_VERSION:=0.8.1 -PKG_RELEASE:=2 - -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=https://codeload.github.com/mozilla-iot/gateway/tar.gz/v$(PKG_VERSION)? -PKG_HASH:=f407732b9c1d020aa79e9d0b12f1b97e82691d6f58def2df067f790f4f640e30 - -PKG_BUILD_DEPENDS:=node/host openzwave - -PKG_MAINTAINER:=Marko Ratkaj <marko.ratkaj@sartura.hr> -PKG_LICENSE:=MPL-2.0 -PKG_LICENSE_FILES:=LICENSE - -include $(INCLUDE_DIR)/package.mk - -define Package/node-mozilla-iot-gateway - SUBMENU:=Node.js - SECTION:=lang - CATEGORY:=Languages - TITLE:=WebThings Gateway by Mozilla - URL:=https://iot.mozilla.org/gateway/ - DEPENDS:= +libpthread +node +node-npm +libopenzwave +openzwave-config +python +python3-light +python3-pip +openssl-util - DEPENDS+= +MOIT_enable-plugin-support:git-http - MENU:=1 -endef - -define Package/node-mozilla-iot-gateway/description - Build Your Own Web of Things Gateway. The "Web of Things" (WoT) is the - idea of taking the lessons learned from the World Wide Web and applying - them to IoT. It's about creating a decentralized Internet of Things by - giving Things URLs on the web to make them linkable and discoverable, - and defining a standard data model and APIs to make them interoperable. -endef - -define Package/node-mozilla-iot-gateway/config - source "$(SOURCE)/Config.in" -endef - -CPU:=$(subst powerpc,ppc,$(subst aarch64,arm64,$(subst x86_64,x64,$(subst i386,ia32,$(ARCH))))) - -TARGET_CFLAGS+=$(FPIC) - -define Build/Compile - $(MAKE_VARS) \ - $(MAKE_FLAGS) \ - npm_config_arch=$(CONFIG_ARCH) \ - npm_config_nodedir=$(STAGING_DIR)/usr/ \ - npm_config_cache=$(TMP_DIR)/npm-cache \ - npm_config_tmp=$(TMP_DIR)/npm-tmp \ - PREFIX="$(PKG_INSTALL_DIR)/usr/" \ - $(STAGING_DIR_HOSTPKG)/bin/npm install --build-from-source --target_arch=$(CPU) -g $(DL_DIR)/$(PKG_SOURCE) -endef - -define Package/node-mozilla-iot-gateway/install - $(INSTALL_DIR) $(1)/opt/mozilla-iot/gateway/ - $(CP) $(PKG_INSTALL_DIR)/usr/lib/node_modules/webthings-gateway/* $(1)/opt/mozilla-iot/gateway - $(MAKE_VARS) \ - $(MAKE_FLAGS) \ - $(STAGING_DIR_HOSTPKG)/bin/npm --prefix=$(1)/opt/mozilla-iot/gateway install \ - --build-from-source --target_arch=$(CPU) $(1)/opt/mozilla-iot/gateway - $(INSTALL_DIR) $(1)/etc/init.d - $(INSTALL_BIN) ./files/mozilla-iot-gateway.init $(1)/etc/init.d/mozilla-iot-gateway -endef - -$(eval $(call BuildPackage,node-mozilla-iot-gateway)) diff --git a/lang/node-mozilla-iot-gateway/README.md b/lang/node-mozilla-iot-gateway/README.md deleted file mode 100644 index b1d1afd8c..000000000 --- a/lang/node-mozilla-iot-gateway/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# Things Gateway by Mozilla - -Build Your Own Web of Things Gateway. The "Web of Things" (WoT) is the idea of -taking the lessons learned from the World Wide Web and applying them to IoT. -It's about creating a decentralized Internet of Things by giving Things URLs on -the web to make them linkable and discoverable, and defining a standard data -model and APIs to make them interoperable. - -### Getting Started - -These instructions will get you a copy of OpenWrt's build system on your local -machine for development and testing purposes. To check the prerequisites for -your system check out this -[link](https://openwrt.org/docs/guide-developer/build-system/install-buildsystem). - -``` -git clone https://github.com/openwrt/openwrt -cd openwrt -``` - -### Configure the build system - -We need to configure the build system and select the Things Gateway package. -This process is no different from selecting other OpenWrt packages. For this -example we will be using build configuration for Raspberry Pi 2/3. - -Update feeds and open menuconfig interface: - -``` -make package/symlinks -make menuconfig -``` - -Select your target: - -``` -Target System (Broadcom BCM27xx) ---> -Subtarget (BCM2709/BCM2710 32 bit based boards) ---> -Target Profile (Raspberry Pi 2B/3B/3B+/3CM) ---> -``` - -Things Gateway package is a bit beefy. In order to fit the image, extend the -filesystem size from 256 to 1024 MB: - -``` -Target Images ---> - (1024) Root filesystem partition size (in MB) -``` - -Select Things Gateway package: - -``` -Languages ---> - Node.js ---> - <*> node-mozilla-iot-gateway -``` - -Save and exit. - - -### Building the image - -Run the build process and substitute <N> with the number of your CPU cores: - -``` -make -j<N> -``` - - -### Flashing on the SD card - -Process of flashing the image will depend on which device you have. -Instructions below are for Raspberry Pi 2/3. For other devices consult OpenWrt -wiki pages. Be careful to replace the X in the third command with the drive -letter of your SD card. - -``` -cd bin/targets/brcm2708/bcm2709 -gunzip openwrt-brcm2708-bcm2709-rpi-2-ext4-factory.img.gz -sudo dd if=openwrt-brcm2708-bcm2709-rpi-2-ext4-factory.img of=/dev/sdX conv=fsync -``` - -## Running Things Gateway from USB flash drive - -In case the device doesn't have enough internal storage space, it is possible -to run Things Gateway of a USB flash drive. This requires USB flash drive with -ext4 filesystem plugged in the device. - -### Configuration - -Do all steps from "Configure the build system" above, and after that change -node-mozilla-iot-gateway selection from "\*" to "M". This will build the -package and all of it's dependencies but it will not install Things Gateway. - -``` -Languages ---> - Node.js ---> - <M> node-mozilla-iot-gateway -``` - -### Prepare the device - -We need to auto mount the USB flash drive in order for the gateway to start at -boot. To do so, open a console on your embedded device and create a /etc/fstab -file with the following contents. This assumes your USB flash drive is -/dev/sda1: - -``` -/dev/sda1 /opt ext4 rw,relatime,data=ordered 0 1 -/opt/root /root none defaults,bind 0 0 -``` - -Add "mount -a" to the end of the "boot" function in /etc/init.d/boot - -``` -boot() { - . - . - . - /bin/config_generate - uci_apply_defaults - - # temporary hack until configd exists - /sbin/reload_config - - # Added by us - mount -a -} -``` - -### Install Things Gateway package - -After successfully mounting the USB drive, transfer the .ipk file from your -local machine to the device and install it. Note that your package version -might defer. Also note that location of .ipk file depends on the selected -target, but it will be within ./bin/packages directory. We need to use -"--force-space" or else opkg might complain about insufficient space. - -On your local machine: -``` -cd bin/packages/arm_cortex-a9_vfpv3/packages/ -scp node-mozilla-iot-gateway_0.6.0-1_arm_cortex-a9_vfpv3.ipk root@192.168.1.1:/tmp -``` - -On the device: -``` -opkg --force-space install /tmp/node-mozilla-iot-gateway_0.6.0-1_arm_cortex-a9_vfpv3.ipk -``` - -Things Gateway should now start at every boot. diff --git a/lang/node-mozilla-iot-gateway/files/mozilla-iot-gateway.init b/lang/node-mozilla-iot-gateway/files/mozilla-iot-gateway.init deleted file mode 100644 index 8ed67fcac..000000000 --- a/lang/node-mozilla-iot-gateway/files/mozilla-iot-gateway.init +++ /dev/null @@ -1,21 +0,0 @@ -#!/bin/sh /etc/rc.common - -START=99 - -USE_PROCD=1 - -HOME=/root -MOZIOT_HOME="${HOME}/.mozilla-iot" -export PATH="/opt/mozilla-iot/gateway/tools:${PATH}" - -start_service() -{ - mkdir -p /usr/etc/ - ln -sf /etc/openzwave /usr/etc/openzwave - - procd_open_instance mozilla-iot-gateway - procd_set_param command /usr/bin/npm start --prefix /opt/mozilla-iot/gateway - procd_set_param stdout 1 - procd_set_param stderr 1 - procd_close_instance -} diff --git a/lang/node/Makefile b/lang/node/Makefile index 71b023bd0..3ddd9b79d 100644 --- a/lang/node/Makefile +++ b/lang/node/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=node PKG_VERSION:=v8.16.0 -PKG_RELEASE:=2 +PKG_RELEASE:=3 PKG_SOURCE:=node-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://nodejs.org/dist/${PKG_VERSION} PKG_HASH:=3515e8e01568a5dc4dff3d91a76ebc6724f5fa2fbb58b4b0c5da7b178a2f7340 @@ -103,7 +103,7 @@ CONFIGURE_ARGS:= \ HOST_CONFIGURE_VARS:= HOST_CONFIGURE_ARGS:= \ - --dest-os=linux \ + --dest-os=$(if $(findstring Darwin,$(HOST_OS)),mac,linux) \ --without-snapshot \ --prefix=$(STAGING_DIR_HOSTPKG) diff --git a/lang/node/patches/007-fix_host_build_on_macos.patch b/lang/node/patches/007-fix_host_build_on_macos.patch new file mode 100644 index 000000000..34f6ec8b9 --- /dev/null +++ b/lang/node/patches/007-fix_host_build_on_macos.patch @@ -0,0 +1,11 @@ +--- a/tools/gyp/pylib/gyp/generator/make.py ++++ b/tools/gyp/pylib/gyp/generator/make.py +@@ -174,7 +174,7 @@ + + LINK_COMMANDS_MAC = """\ + quiet_cmd_alink = LIBTOOL-STATIC $@ +-cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^) ++cmd_alink = rm -f $@ && ./gyp-mac-tool filter-libtool /usr/bin/libtool $(GYP_LIBTOOLFLAGS) -static -o $@ $(filter %.o,$^) + + quiet_cmd_link = LINK($(TOOLSET)) $@ + cmd_link = $(LINK.$(TOOLSET)) $(GYP_LDFLAGS) $(LDFLAGS.$(TOOLSET)) -o "$@" $(LD_INPUTS) $(LIBS) diff --git a/lang/perl-device-usb/Makefile b/lang/perl-device-usb/Makefile index ac936ba12..fdee5cc8b 100644 --- a/lang/perl-device-usb/Makefile +++ b/lang/perl-device-usb/Makefile @@ -8,15 +8,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=perl-device-usb -PKG_VERSION:=0.37 +PKG_VERSION:=0.38 PKG_RELEASE:=1 PKG_SOURCE_URL:=https://www.cpan.org/authors/id/G/GW/GWADEJ/ PKG_SOURCE:=Device-USB-$(PKG_VERSION).tar.gz -PKG_HASH:=0dd58d9c627b3c539d07263a7b96e1de2adea3a4ddb118cdd45aa638f8702e49 +PKG_HASH:=f0c36379db4913406dc9bd546a6cfd4da20d58f7c2cc085893a2fe67a43465dc -PKG_LICENSE:=GPL-1.0+ Artistic-1.0-Perl PKG_MAINTAINER:=Marcel Denia <naoir@gmx.net> +PKG_LICENSE:=GPL-1.0-or-later Artistic-1.0-Perl PKG_BUILD_DIR:=$(BUILD_DIR)/perl/Device-USB-$(PKG_VERSION) PKG_BUILD_DEPENDS:=perl-inline-c/host diff --git a/lang/perl-device-usb/patches/140-avoid-libusb-name-conflicts.patch b/lang/perl-device-usb/patches/140-avoid-libusb-name-conflicts.patch index 552306ec4..9ce0ba320 100644 --- a/lang/perl-device-usb/patches/140-avoid-libusb-name-conflicts.patch +++ b/lang/perl-device-usb/patches/140-avoid-libusb-name-conflicts.patch @@ -3,12 +3,12 @@ @@ -15,6 +15,7 @@ use Inline ( ($ENV{LIBUSB_INCDIR} ? ( INC => "-I\"$ENV{LIBUSB_INCDIR}\"" ) : () ), NAME => 'Device::USB', - VERSION => '0.37', + VERSION => '0.38', + PREFIX => 'deviceusb_', ); Inline->init(); -@@ -540,74 +541,74 @@ unsigned DeviceUSBDebugLevel() +@@ -657,74 +658,74 @@ unsigned DeviceUSBDebugLevel() return debugLevel; } @@ -98,7 +98,7 @@ } #if LIBUSB_HAS_GET_DRIVER_NP ret = usb_get_driver_np((usb_dev_handle *)dev, interface, name, namelen); -@@ -618,11 +619,11 @@ int libusb_get_driver_np(void *dev, int +@@ -735,11 +736,11 @@ int libusb_get_driver_np(void *dev, int interface, char *name, unsigned int name #endif } @@ -112,7 +112,7 @@ } #if LIBUSB_HAS_DETACH_KERNEL_DRIVER_NP return usb_detach_kernel_driver_np((usb_dev_handle *)dev, interface); -@@ -631,25 +632,25 @@ int libusb_detach_kernel_driver_np(void +@@ -748,25 +749,25 @@ int libusb_detach_kernel_driver_np(void *dev, int interface) #endif } @@ -143,7 +143,7 @@ { int i = 0; int retval = 0; -@@ -658,7 +659,7 @@ void libusb_control_msg(void *dev, int r +@@ -775,7 +776,7 @@ void libusb_control_msg(void *dev, int requesttype, int request, int value, int if(DeviceUSBDebugLevel()) { @@ -152,7 +152,7 @@ requesttype, request, value, index, bytes, size, timeout ); /* maybe need to add support for printing the bytes string. */ -@@ -691,54 +692,54 @@ void libusb_control_msg(void *dev, int r +@@ -808,54 +809,54 @@ void libusb_control_msg(void *dev, int requesttype, int request, int value, int Inline_Stack_Done; } diff --git a/lang/perl-inline-c/Makefile b/lang/perl-inline-c/Makefile index bc5913da7..9de7a9087 100644 --- a/lang/perl-inline-c/Makefile +++ b/lang/perl-inline-c/Makefile @@ -8,15 +8,16 @@ include $(TOPDIR)/rules.mk PKG_NAME:=perl-inline-c -PKG_VERSION:=0.78 +PKG_VERSION:=0.81 PKG_RELEASE:=1 -PKG_SOURCE_URL:=http://www.cpan.org/authors/id/T/TI/TINITA PKG_SOURCE:=Inline-C-$(PKG_VERSION).tar.gz -PKG_HASH:=9a7804d85c01a386073d2176582b0262b6374c5c0341049da3ef84c6f53efbc7 +PKG_SOURCE_URL:=https://www.cpan.org/authors/id/T/TI/TINITA +PKG_HASH:=f185258d9050d7f79b4f00f12625cc469c2f700ff62d3e831cb18d80d2c87aac -PKG_LICENSE:=GPL-1.0+ Artistic-1.0-Perl PKG_MAINTAINER:=Marcel Denia <naoir@gmx.net> +PKG_LICENSE:=GPL-1.0-or-later Artistic-1.0-Perl +PKG_LICENSE_FILES:=LICENSE HOST_BUILD_DIR:=$(BUILD_DIR_HOST)/perl/Inline-C-$(PKG_VERSION) HOST_BUILD_DEPENDS:=perl/host perl-inline/host perl-parse-recdescent/host perl-file-sharedir-install/host @@ -32,7 +33,7 @@ define Package/perl-inline-c SECTION:=lang CATEGORY:=Languages TITLE:=C Language Support for Inline - URL:=http://search.cpan.org/dist/Inline-C/ + URL:=https://search.cpan.org/dist/Inline-C/ DEPENDS:=perl +perl-inline +perl-parse-recdescent +perlbase-config +perlbase-cwd +perlbase-data +perlbase-essential +perlbase-file +perlbase-if endef diff --git a/lang/perl-inline-c/patches/100-inline_c-no_compile_hack.patch b/lang/perl-inline-c/patches/100-inline_c-no_compile_hack.patch index dcfe2bb58..b001c206f 100644 --- a/lang/perl-inline-c/patches/100-inline_c-no_compile_hack.patch +++ b/lang/perl-inline-c/patches/100-inline_c-no_compile_hack.patch @@ -1,6 +1,6 @@ --- a/lib/Inline/C.pm +++ b/lib/Inline/C.pm -@@ -361,7 +361,7 @@ sub build { +@@ -380,7 +380,7 @@ sub build { $o->call('write_XS', 'Build Glue 1'); $o->call('write_Inline_headers', 'Build Glue 2'); $o->call('write_Makefile_PL', 'Build Glue 3'); diff --git a/lang/perl-inline-c/patches/110-inline_c-make_system_typemap_overridable.patch b/lang/perl-inline-c/patches/110-inline_c-make_system_typemap_overridable.patch index b2ae09e6b..d26318ab4 100644 --- a/lang/perl-inline-c/patches/110-inline_c-make_system_typemap_overridable.patch +++ b/lang/perl-inline-c/patches/110-inline_c-make_system_typemap_overridable.patch @@ -1,6 +1,6 @@ --- a/lib/Inline/C.pm +++ b/lib/Inline/C.pm -@@ -438,22 +438,28 @@ sub get_maps { +@@ -457,22 +457,28 @@ sub get_maps { print STDERR "get_maps Stage\n" if $o->{CONFIG}{BUILD_NOISY}; my $typemap = ''; my $file; diff --git a/lang/perl-inline/Makefile b/lang/perl-inline/Makefile index d4189b766..427a72d26 100644 --- a/lang/perl-inline/Makefile +++ b/lang/perl-inline/Makefile @@ -8,15 +8,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=perl-inline -PKG_VERSION:=0.82 +PKG_VERSION:=0.83 PKG_RELEASE:=1 PKG_SOURCE:=Inline-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://cpan.metacpan.org/authors/id/T/TI/TINITA -PKG_HASH:=1af94a8e95e4ba4545592341c47d8d1dc45b01822b877f7d3095a438566e874b +PKG_HASH:=171a8e5f66faf88fe5ef4de7126267cd8fb2503483432eabc3b88ac7d5be4239 PKG_MAINTAINER:=Marcel Denia <naoir@gmx.net> -PKG_LICENSE:=GPL-1.0+ Artistic-1.0-Perl +PKG_LICENSE:=GPL-1.0-or-later Artistic-1.0-Perl PKG_LICENSE_FILES:=LICENSE PKG_BUILD_DIR:=$(BUILD_DIR)/perl/Inline-$(PKG_VERSION) diff --git a/lang/php7/Makefile b/lang/php7/Makefile index b51dc065b..b8a2476c5 100644 --- a/lang/php7/Makefile +++ b/lang/php7/Makefile @@ -6,7 +6,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=php -PKG_VERSION:=7.2.19 +PKG_VERSION:=7.2.21 PKG_RELEASE:=1 PKG_MAINTAINER:=Michael Heimpold <mhei@heimpold.de> @@ -17,7 +17,7 @@ PKG_CPE_ID:=cpe:/a:php:php PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=http://www.php.net/distributions/ -PKG_HASH:=4ffa2404a88d60e993a9fe69f829ebec3eb1e006de41b6048ce5e91bbeaa9282 +PKG_HASH:=de06aff019d8f5079115795bd7d8eedd4cd03daecb62d58abb18f492dd995c95 PKG_FIXUP:=libtool autoreconf PKG_BUILD_PARALLEL:=1 diff --git a/lang/python/django-appconf/Makefile b/lang/python/django-appconf/Makefile index e37f15848..8e81051e1 100644 --- a/lang/python/django-appconf/Makefile +++ b/lang/python/django-appconf/Makefile @@ -39,8 +39,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -51,8 +52,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-compressor/Makefile b/lang/python/django-compressor/Makefile index a4c9c4e09..6638bae69 100644 --- a/lang/python/django-compressor/Makefile +++ b/lang/python/django-compressor/Makefile @@ -39,10 +39,11 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django \ + python-django \ +PACKAGE_python-$(PKG_NAME):python-django-appconf \ +PACKAGE_python-$(PKG_NAME):python-rcssmin VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -56,10 +57,11 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django \ + python3-django \ +PACKAGE_python3-$(PKG_NAME):python3-django-appconf \ +PACKAGE_python3-$(PKG_NAME):python3-rcssmin VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-constance/Makefile b/lang/python/django-constance/Makefile index 7a225acb7..749c0aae1 100644 --- a/lang/python/django-constance/Makefile +++ b/lang/python/django-constance/Makefile @@ -37,8 +37,9 @@ define Package/python-django-constance $(call Package/python-django-constance/Default) DEPENDS:= \ +PACKAGE_python-django-constance:python \ - +PACKAGE_python-django-constance:python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-django-constance/description @@ -49,8 +50,9 @@ define Package/python3-django-constance $(call Package/python-django-constance/Default) DEPENDS:= \ +PACKAGE_python3-django-constance:python3 \ - +PACKAGE_python3-django-constance:python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-django-constance/description diff --git a/lang/python/django-formtools/Makefile b/lang/python/django-formtools/Makefile index 4057bc687..445cd3446 100644 --- a/lang/python/django-formtools/Makefile +++ b/lang/python/django-formtools/Makefile @@ -37,8 +37,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -50,8 +51,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-jsonfield/Makefile b/lang/python/django-jsonfield/Makefile index ed3ffbceb..9e95d3a98 100644 --- a/lang/python/django-jsonfield/Makefile +++ b/lang/python/django-jsonfield/Makefile @@ -39,8 +39,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -51,8 +52,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-picklefield/Makefile b/lang/python/django-picklefield/Makefile index b24157b27..9a7331fc4 100644 --- a/lang/python/django-picklefield/Makefile +++ b/lang/python/django-picklefield/Makefile @@ -39,8 +39,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -51,8 +52,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-postoffice/Makefile b/lang/python/django-postoffice/Makefile index 0a0a6f167..cd8ea3dbe 100644 --- a/lang/python/django-postoffice/Makefile +++ b/lang/python/django-postoffice/Makefile @@ -39,9 +39,10 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django \ + python-django \ +PACKAGE_python-$(PKG_NAME):python-django-jsonfield VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -53,9 +54,10 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django \ + python3-django \ +PACKAGE_python3-$(PKG_NAME):python3-django-jsonfield VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-ranged-response/Makefile b/lang/python/django-ranged-response/Makefile index 818530f59..923f5c75b 100644 --- a/lang/python/django-ranged-response/Makefile +++ b/lang/python/django-ranged-response/Makefile @@ -37,8 +37,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -49,8 +50,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-restframework/Makefile b/lang/python/django-restframework/Makefile index b699c4857..45c8d7e1f 100644 --- a/lang/python/django-restframework/Makefile +++ b/lang/python/django-restframework/Makefile @@ -39,8 +39,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -51,8 +52,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-simple-captcha/Makefile b/lang/python/django-simple-captcha/Makefile index 81d94d9ee..21f81594d 100644 --- a/lang/python/django-simple-captcha/Makefile +++ b/lang/python/django-simple-captcha/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=django-simple-captcha -PKG_VERSION:=0.5.11 -PKG_RELEASE:=4 +PKG_VERSION:=0.5.12 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/mbi/django-simple-captcha/tar.gz/v$(PKG_VERSION)? -PKG_HASH:=7ca26a4f48e14e5f8be022c0dc099ef98980f3fc99f403ca565ab1f3addaee5b +PKG_HASH:=89db73a3883573ad5e22c511948a5500491f9848363174d835a2364750c81a77 PKG_MAINTAINER:=Eneas U de Queiroz <cotequeiroz@gmail.com> PKG_LICENSE:=MIT @@ -40,10 +40,11 @@ $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ +PACKAGE_python-$(PKG_NAME):python-six \ - +PACKAGE_python-$(PKG_NAME):python-django \ + python-django \ +PACKAGE_python-$(PKG_NAME):python-pillow \ +PACKAGE_python-$(PKG_NAME):python-django-ranged-response VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -56,10 +57,11 @@ $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ +PACKAGE_python3-$(PKG_NAME):python3-six \ - +PACKAGE_python3-$(PKG_NAME):python3-django \ + python3-django \ +PACKAGE_python3-$(PKG_NAME):python3-pillow \ +PACKAGE_python3-$(PKG_NAME):python3-django-ranged-response VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-statici18n/Makefile b/lang/python/django-statici18n/Makefile index 97a880596..37f073534 100644 --- a/lang/python/django-statici18n/Makefile +++ b/lang/python/django-statici18n/Makefile @@ -39,8 +39,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -51,8 +52,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django-webpack-loader/Makefile b/lang/python/django-webpack-loader/Makefile index 5186ed946..72ed86799 100644 --- a/lang/python/django-webpack-loader/Makefile +++ b/lang/python/django-webpack-loader/Makefile @@ -37,8 +37,9 @@ define Package/python-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python-$(PKG_NAME):python \ - +PACKAGE_python-$(PKG_NAME):python-django + python-django VARIANT:=python + MDEPENDS:=python-django endef define Package/python-$(PKG_NAME)/description @@ -49,8 +50,9 @@ define Package/python3-$(PKG_NAME) $(call Package/$(PKG_NAME)/Default) DEPENDS:= \ +PACKAGE_python3-$(PKG_NAME):python3 \ - +PACKAGE_python3-$(PKG_NAME):python3-django + python3-django VARIANT:=python3 + MDEPENDS:=python3-django endef define Package/python3-$(PKG_NAME)/description diff --git a/lang/python/django/Makefile b/lang/python/django/Makefile index f280d3fca..3c51c7c21 100644 --- a/lang/python/django/Makefile +++ b/lang/python/django/Makefile @@ -35,6 +35,7 @@ define Package/django/Default CATEGORY:=Languages TITLE:=The web framework for perfectionists with deadlines. URL:=https://www.djangoproject.com/ + MENU:=1 endef define Package/python-django diff --git a/lang/python/python-asn1crypto/Makefile b/lang/python/python-asn1crypto/Makefile index 21d443e93..8f0993fc7 100644 --- a/lang/python/python-asn1crypto/Makefile +++ b/lang/python/python-asn1crypto/Makefile @@ -61,5 +61,8 @@ endef $(eval $(call PyPackage,python-asn1crypto)) $(eval $(call BuildPackage,python-asn1crypto)) +$(eval $(call BuildPackage,python-asn1crypto-src)) + $(eval $(call Py3Package,python3-asn1crypto)) $(eval $(call BuildPackage,python3-asn1crypto)) +$(eval $(call BuildPackage,python3-asn1crypto-src)) diff --git a/lang/python/python-dateutil/Makefile b/lang/python/python-dateutil/Makefile index 7a211e5b0..c90291c87 100644 --- a/lang/python/python-dateutil/Makefile +++ b/lang/python/python-dateutil/Makefile @@ -8,13 +8,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=python-dateutil -PKG_VERSION:=2.7.5 -PKG_RELEASE:=3 +PKG_VERSION:=2.8.0 +PKG_RELEASE:=1 PKG_LICENSE:=BSD-2-Clause PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/python-dateutil -PKG_HASH:=88f9287c0174266bb0d8cedd395cfba9c58e87e5ad86b2ce58859bc11be3cf02 +PKG_HASH:=c89805f6f4d64db21ed966fda138f8a5ed7a4fdbc1a8ee329ce1b74e3c74da9e PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-dateutil-$(PKG_VERSION) PKG_MAINTAINER:=Alexandru Ardelean <ardeleanalex@gmail.com> diff --git a/lang/python/python-evdev/Makefile b/lang/python/python-evdev/Makefile index 6f062d1c5..d72dd22a7 100644 --- a/lang/python/python-evdev/Makefile +++ b/lang/python/python-evdev/Makefile @@ -74,5 +74,8 @@ PYTHON_PKG_SETUP_GLOBAL_ARGS:=$(PYTHON3_PKG_SETUP_GLOBAL_ARGS) $(eval $(call PyPackage,python-evdev)) $(eval $(call BuildPackage,python-evdev)) +$(eval $(call BuildPackage,python-evdev-src)) + $(eval $(call Py3Package,python3-evdev)) $(eval $(call BuildPackage,python3-evdev)) +$(eval $(call BuildPackage,python3-evdev-src)) diff --git a/lang/python/python-lxml/Makefile b/lang/python/python-lxml/Makefile index 1d8b54c7e..8b61335cd 100644 --- a/lang/python/python-lxml/Makefile +++ b/lang/python/python-lxml/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=python-lxml -PKG_VERSION:=4.3.1 +PKG_VERSION:=4.4.1 PKG_RELEASE:=1 PKG_SOURCE:=lxml-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/l/lxml -PKG_HASH:=da5e7e941d6e71c9c9a717c93725cda0708c2474f532e3680ac5e39ec57d224d +PKG_HASH:=c81cb40bff373ab7a7446d6bbca0190bccc5be3448b47b51d729e37799bb5692 PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-lxml-$(PKG_VERSION) PKG_UNPACK=$(HOST_TAR) -C $(PKG_BUILD_DIR) --strip-components=1 -xzf $(DL_DIR)/$(PKG_SOURCE) @@ -89,6 +89,8 @@ endef $(eval $(call PyPackage,python-lxml)) $(eval $(call BuildPackage,python-lxml)) +$(eval $(call BuildPackage,python-lxml-src)) $(eval $(call Py3Package,python3-lxml)) $(eval $(call BuildPackage,python3-lxml)) +$(eval $(call BuildPackage,python3-lxml-src)) diff --git a/lang/python/python-package.mk b/lang/python/python-package.mk index 272aae44f..9bd0b9aa4 100644 --- a/lang/python/python-package.mk +++ b/lang/python/python-package.mk @@ -45,6 +45,7 @@ define PyPackage EXTRA_DEPENDS:= TITLE+= (sources) USERID:= + MENU:= endef define Package/$(1)-src/description diff --git a/lang/python/python-pyasn1-modules/Makefile b/lang/python/python-pyasn1-modules/Makefile index 8456a55aa..10b1160ef 100644 --- a/lang/python/python-pyasn1-modules/Makefile +++ b/lang/python/python-pyasn1-modules/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=python-pyasn1-modules -PKG_VERSION:=0.2.5 +PKG_VERSION:=0.2.6 PKG_RELEASE:=1 PKG_SOURCE:=pyasn1-modules-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pyasn1-modules -PKG_HASH:=ef721f68f7951fab9b0404d42590f479e30d9005daccb1699b0a51bb4177db96 +PKG_HASH:=43c17a83c155229839cc5c6b868e8d0c6041dba149789b6d6e28801c64821722 PKG_LICENSE:=BSD-2-Clause PKG_LICENSE_FILES:=LICENSE.txt diff --git a/lang/python/python-pyasn1/Makefile b/lang/python/python-pyasn1/Makefile index 4a7c5451c..9c4235d2d 100644 --- a/lang/python/python-pyasn1/Makefile +++ b/lang/python/python-pyasn1/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=python-pyasn1 -PKG_VERSION:=0.4.5 +PKG_VERSION:=0.4.6 PKG_RELEASE:=1 PKG_SOURCE:=pyasn1-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pyasn1 -PKG_HASH:=da2420fe13a9452d8ae97a0e478adde1dee153b11ba832a95b223a2ba01c10f7 +PKG_HASH:=b773d5c9196ffbc3a1e13bdf909d446cad80a039aa3340bcad72f395b76ebc86 PKG_LICENSE:=BSD-2-Clause PKG_LICENSE_FILES:=LICENSE.txt diff --git a/lang/python/python-pytz/Makefile b/lang/python/python-pytz/Makefile index b3acbd104..841855f65 100644 --- a/lang/python/python-pytz/Makefile +++ b/lang/python/python-pytz/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=python-pytz -PKG_VERSION:=2019.1 -PKG_RELEASE:=2 +PKG_VERSION:=2019.2 +PKG_RELEASE:=1 PKG_SOURCE:=pytz-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/p/pytz -PKG_HASH:=d747dd3d23d77ef44c6a3526e274af6efeb0a6f1afd5a69ba4d5be4098c8e141 +PKG_HASH:=26c0b32e437e54a18161324a2fca3c4b9846b74a8dccddd843113109e1116b32 PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-pytz-$(PKG_VERSION) diff --git a/lang/python/python-twisted/Makefile b/lang/python/python-twisted/Makefile index 136d74d4b..6bb544683 100644 --- a/lang/python/python-twisted/Makefile +++ b/lang/python/python-twisted/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=python-twisted -PKG_VERSION:=19.2.1 +PKG_VERSION:=19.7.0 PKG_RELEASE:=1 PKG_SOURCE:=Twisted-$(PKG_VERSION).tar.bz2 PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/T/Twisted -PKG_HASH:=fa2c04c2d68a9be7fc3975ba4947f653a57a656776f24be58ff0fe4b9aaf3e52 +PKG_HASH:=d5db93026568f60cacdc0615fcd21d46f694a6bfad0ef3ff53cde2b4bb85a39d PKG_BUILD_DIR:=$(BUILD_DIR)/$(BUILD_VARIANT)-twisted-$(PKG_VERSION) PKG_BUILD_DEPENDS:=libtirpc diff --git a/lang/python/python-twisted/patches/001-omit-tkconch.patch b/lang/python/python-twisted/patches/001-omit-tkconch.patch index a5bddaaec..67a37bc1c 100644 --- a/lang/python/python-twisted/patches/001-omit-tkconch.patch +++ b/lang/python/python-twisted/patches/001-omit-tkconch.patch @@ -1,6 +1,6 @@ --- a/src/twisted/python/_setup.py +++ b/src/twisted/python/_setup.py -@@ -147,7 +147,6 @@ _CONSOLE_SCRIPTS = [ +@@ -149,7 +149,6 @@ _CONSOLE_SCRIPTS = [ "conch = twisted.conch.scripts.conch:run", "mailmail = twisted.mail.scripts.mailmail:run", "pyhtmlizer = twisted.scripts.htmlizer:run", diff --git a/lang/python/python-twisted/patches/002-omit-tests.patch b/lang/python/python-twisted/patches/002-omit-tests.patch index 83ca2cd76..51adbbdf8 100644 --- a/lang/python/python-twisted/patches/002-omit-tests.patch +++ b/lang/python/python-twisted/patches/002-omit-tests.patch @@ -1,6 +1,6 @@ --- a/src/twisted/python/_setup.py +++ b/src/twisted/python/_setup.py -@@ -173,11 +173,6 @@ class ConditionalExtension(Extension, ob +@@ -175,11 +175,6 @@ class ConditionalExtension(Extension, ob # The C extensions used for Twisted. _EXTENSIONS = [ ConditionalExtension( @@ -12,7 +12,7 @@ "twisted.internet.iocpreactor.iocpsupport", sources=[ "src/twisted/internet/iocpreactor/iocpsupport/iocpsupport.c", -@@ -241,12 +236,11 @@ def getSetupArgs(extensions=_EXTENSIONS) +@@ -284,12 +279,11 @@ def getSetupArgs(extensions=_EXTENSIONS, "incremental >= 16.10.1", "Automat >= 0.3.0", "hyperlink >= 17.1.1", @@ -26,7 +26,7 @@ use_incremental=True, setup_requires=["incremental >= 16.10.1"], install_requires=requirements, -@@ -256,7 +250,7 @@ def getSetupArgs(extensions=_EXTENSIONS) +@@ -299,7 +293,7 @@ def getSetupArgs(extensions=_EXTENSIONS, cmdclass=command_classes, include_package_data=True, exclude_package_data={ diff --git a/lang/python/python/Makefile b/lang/python/python/Makefile index b83613cdc..b75e22966 100644 --- a/lang/python/python/Makefile +++ b/lang/python/python/Makefile @@ -12,7 +12,7 @@ include ../python-version.mk PKG_NAME:=python PKG_VERSION:=$(PYTHON_VERSION).$(PYTHON_VERSION_MICRO) -PKG_RELEASE:=8 +PKG_RELEASE:=9 PKG_SOURCE:=Python-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://www.python.org/ftp/python/$(PKG_VERSION) @@ -290,13 +290,18 @@ define PyPackage/python/filespec endef HOST_LDFLAGS += \ - $$$$(pkg-config --static --libs libcrypto libssl) -Wl$(comma)-rpath=$(STAGING_DIR_HOSTPKG)/lib + -Wl$(comma)-rpath$(comma)$(STAGING_DIR_HOSTPKG)/lib ifeq ($(HOST_OS),Linux) HOST_LDFLAGS += \ -Wl,--no-as-needed -lrt endif +ifeq ($(HOST_OS),Darwin) +HOST_CONFIGURE_VARS += \ + ac_cv_header_libintl_h=no +endif + HOST_CONFIGURE_ARGS+= \ --without-cxx-main \ --without-pymalloc \ diff --git a/lang/python/python/patches/025-utime.patch b/lang/python/python/patches/025-utime.patch new file mode 100644 index 000000000..20d0310d9 --- /dev/null +++ b/lang/python/python/patches/025-utime.patch @@ -0,0 +1,11 @@ +--- a/Modules/posixmodule.c ++++ b/Modules/posixmodule.c +@@ -3070,7 +3070,7 @@ done: + if (arg == Py_None) { + /* optional time values not given */ + Py_BEGIN_ALLOW_THREADS +- res = utime(path, NULL); ++ res = utimes(path, NULL); + Py_END_ALLOW_THREADS + } + else if (!PyTuple_Check(arg) || PyTuple_Size(arg) != 2) { diff --git a/lang/python/python3-package.mk b/lang/python/python3-package.mk index 6e163e099..e14290081 100644 --- a/lang/python/python3-package.mk +++ b/lang/python/python3-package.mk @@ -44,6 +44,7 @@ define Py3Package EXTRA_DEPENDS:= TITLE+= (sources) USERID:= + MENU:= endef define Package/$(1)-src/description diff --git a/lang/python/python3/Makefile b/lang/python/python3/Makefile index 4cd1dc997..79a71ccaf 100644 --- a/lang/python/python3/Makefile +++ b/lang/python/python3/Makefile @@ -285,13 +285,18 @@ define Py3Package/python3/filespec endef HOST_LDFLAGS += \ - $$$$(pkg-config --static --libs libcrypto libssl) -Wl$(comma)-rpath=$(STAGING_DIR_HOSTPKG)/lib + -Wl$(comma)-rpath$(comma)$(STAGING_DIR_HOSTPKG)/lib ifeq ($(HOST_OS),Linux) HOST_LDFLAGS += \ -Wl,--no-as-needed -lrt endif +ifeq ($(HOST_OS),Darwin) +HOST_CONFIGURE_VARS += \ + ac_cv_header_libintl_h=no +endif + HOST_CONFIGURE_ARGS+= \ --without-cxx-main \ --without-pymalloc \ diff --git a/libs/elektra/Makefile b/libs/elektra/Makefile index 367ee714c..b78253e3f 100644 --- a/libs/elektra/Makefile +++ b/libs/elektra/Makefile @@ -15,7 +15,7 @@ PKG_NAME:=elektra PKG_LICENSE:=BSD-3-Clause PKG_LICENSE_FILES:=LICENSE.md PKG_VERSION:=0.8.21 -PKG_RELEASE:=3 +PKG_RELEASE:=4 # Use this for official releasees PKG_HASH:=51892570f18d1667d0da4d0908a091e41b41c20db9835765677109a3d150cd26 @@ -29,7 +29,8 @@ PKG_SOURCE_URL:=http://ftp.libelektra.org/ftp/elektra/releases #PKG_SOURCE_VERSION:=e97efb29a94f3a49cb952d06552fcf53708ea8c7 #PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz -PKG_BUILD_DEPENDS:=elektra/host swig/host +HOST_BUILD_DEPENDS:=swig/host +PKG_BUILD_DEPENDS:=elektra/host lua include $(INCLUDE_DIR)/package.mk include $(INCLUDE_DIR)/host-build.mk @@ -261,7 +262,7 @@ endef define Package/libelektra-lua $(call Package/libelektra/Default) TITLE:=Elektra lua plugin - DEPENDS:=+libelektra-core +lua +libstdcpp + DEPENDS:=+libelektra-core +lua5.3 +libstdcpp endef define Package/libelektra-lua/description diff --git a/libs/flac/Makefile b/libs/flac/Makefile index 47de71e32..954121076 100644 --- a/libs/flac/Makefile +++ b/libs/flac/Makefile @@ -8,21 +8,21 @@ include $(TOPDIR)/rules.mk PKG_NAME:=flac -PKG_VERSION:=1.3.2 -PKG_RELEASE:=2 +PKG_VERSION:=1.3.3 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://downloads.xiph.org/releases/flac/ -PKG_HASH:=91cfc3ed61dc40f47f050a109b08610667d73477af6ef36dcad31c31a4a8d53f -PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net> - -PKG_INSTALL:=1 +PKG_HASH:=213e82bd716c9de6db2f98bcadbc4c24c7e2efe8c75939a1a84e28539c4e1748 +PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net> PKG_LICENSE:=GFDL-1.2 GPL-2 LGPL-2.1 BSD-3-Clause PKG_LICENSE_FILES:=README COPYING.FDL COPYING.GPL COPYING.LGPL COPYING.Xiph PKG_CPE_ID:=cpe:/a:flac_project:flac PKG_FIXUP:=autoreconf +PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 PKG_REMOVE_FILES:=autogen.sh aclocal.m4 include $(INCLUDE_DIR)/package.mk @@ -37,15 +37,18 @@ endef CONFIGURE_ARGS += \ --disable-cpplibs \ --disable-sse \ - --disable-3dnow \ --disable-altivec \ --disable-doxgen-docs \ --disable-local-xmms-plugin \ --disable-xmms-plugin \ --disable-ogg \ --disable-oggtest \ + --disable-thorough-tests \ + --disable-examples \ + --disable-rpath \ $(if $(CONFIG_DEBUG),--enable-debug) \ - --enable-static + --enable-static \ + --without-pic TARGET_CFLAGS += $(FPIC) diff --git a/libs/flac/patches/001-no-docs-and-examples.patch b/libs/flac/patches/001-no-docs-and-examples.patch index 7c34288c3..c844771d8 100644 --- a/libs/flac/patches/001-no-docs-and-examples.patch +++ b/libs/flac/patches/001-no-docs-and-examples.patch @@ -4,11 +4,11 @@ ACLOCAL_AMFLAGS = -I m4 --SUBDIRS = doc include m4 man src examples test build objs microbench +-SUBDIRS = doc include m4 man src test build objs microbench +SUBDIRS = include m4 src build objs - EXTRA_DIST = \ - COPYING.FDL \ + if EXAMPLES + SUBDIRS += examples --- a/src/Makefile.am +++ b/src/Makefile.am @@ -30,11 +30,6 @@ SUBDIRS = \ diff --git a/libs/flac/patches/005-gcc_debug_options.patch b/libs/flac/patches/005-gcc_debug_options.patch deleted file mode 100644 index ea7db8e14..000000000 --- a/libs/flac/patches/005-gcc_debug_options.patch +++ /dev/null @@ -1,16 +0,0 @@ ---- a/configure.ac -+++ b/configure.ac -@@ -386,10 +386,11 @@ fi - - if test "x$debug" = xtrue; then - CPPFLAGS="-DDEBUG $CPPFLAGS" -- CFLAGS="-g $CFLAGS" -+ CFLAGS=$(echo "$CFLAGS" | sed 's/-g[0-9]*//') -+ CFLAGS="-g3 $CFLAGS" - else - CPPFLAGS="-DNDEBUG $CPPFLAGS" -- CFLAGS=$(echo "$CFLAGS" | sed 's/-O2//') -+ CFLAGS=$(echo "$CFLAGS" | sed 's/-O2//;s/-g[0-9]*//') - CFLAGS="-O3 -funroll-loops $CFLAGS" - fi - diff --git a/libs/flac/patches/010-automake-compat.patch b/libs/flac/patches/010-automake-compat.patch deleted file mode 100644 index 83180480b..000000000 --- a/libs/flac/patches/010-automake-compat.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/Makefile.am -+++ b/Makefile.am -@@ -33,6 +33,8 @@ ACLOCAL_AMFLAGS = -I m4 - - SUBDIRS = include m4 src build objs - -+ACLOCAL_AMFLAGS = -I m4 -+ - EXTRA_DIST = \ - COPYING.FDL \ - COPYING.GPL \ diff --git a/libs/flac/patches/010-utime.patch b/libs/flac/patches/010-utime.patch new file mode 100644 index 000000000..0c1d17024 --- /dev/null +++ b/libs/flac/patches/010-utime.patch @@ -0,0 +1,118 @@ +Return-Path: <rosenp@gmail.com> +Received: from localhost.localdomain (76-14-106-55.rk.wavecable.com. [76.14.106.55]) + by smtp.gmail.com with ESMTPSA id f19sm148509170pfk.180.2019.08.09.13.01.06 + for <flac-dev@xiph.org> + (version=TLS1_3 cipher=AEAD-AES256-GCM-SHA384 bits=256/256); + Fri, 09 Aug 2019 13:01:06 -0700 (PDT) +From: Rosen Penev <rosenp@gmail.com> +To: flac-dev@xiph.org +Subject: [PATCH] Switch to utimensat for newer POSIX versions +Date: Fri, 9 Aug 2019 13:01:05 -0700 +Message-Id: <20190809200105.1443-1-rosenp@gmail.com> +X-Mailer: git-send-email 2.17.1 + +Some libcs like uClibc-ng can optionally disable deprecated functions. +utime is one of them. When done so, both the header and the function go +missing. + +This fixes flac_utime to work in such a situation. +--- + include/share/compat.h | 10 +++++++++- + src/libFLAC/metadata_iterators.c | 9 +++++++-- + src/share/grabbag/file.c | 9 +++++++-- + 3 files changed, 23 insertions(+), 5 deletions(-) + +diff --git a/include/share/compat.h b/include/share/compat.h +index f3041655..a063c083 100644 +--- a/include/share/compat.h ++++ b/include/share/compat.h +@@ -112,9 +112,13 @@ + #include <sys/utime.h> /* for utime() */ + #endif + #else ++#if _POSIX_C_SOURCE >= 200809L ++#include <fcntl.h> ++#else + #include <sys/types.h> /* some flavors of BSD (like OS X) require this to get time_t */ + #include <utime.h> /* for utime() */ + #endif ++#endif + + #if defined _MSC_VER + # if _MSC_VER >= 1800 +@@ -160,11 +164,15 @@ + + #define flac_fopen fopen + #define flac_chmod chmod +-#define flac_utime utime + #define flac_unlink unlink + #define flac_rename rename + #define flac_stat stat + ++#if _POSIX_C_SOURCE >= 200809L ++#define flac_utime(a, b) utimensat (AT_FDCWD, a, *b, 0) ++#else ++#define flac_utime utime ++#endif + #endif + + #ifdef _WIN32 +diff --git a/src/libFLAC/metadata_iterators.c b/src/libFLAC/metadata_iterators.c +index 352a6c7a..d5255eb9 100644 +--- a/src/libFLAC/metadata_iterators.c ++++ b/src/libFLAC/metadata_iterators.c +@@ -3422,13 +3422,18 @@ FLAC__bool get_file_stats_(const char *filename, struct flac_stat_s *stats) + + void set_file_stats_(const char *filename, struct flac_stat_s *stats) + { +- struct utimbuf srctime; +- + FLAC__ASSERT(0 != filename); + FLAC__ASSERT(0 != stats); + ++#if _POSIX_C_SOURCE >= 200809L ++ struct timespec srctime[2] = {}; ++ srctime[0].tv_sec = stats->st_atime; ++ srctime[1].tv_sec = stats->st_mtime; ++#else ++ struct utimbuf srctime; + srctime.actime = stats->st_atime; + srctime.modtime = stats->st_mtime; ++#endif + (void)flac_chmod(filename, stats->st_mode); + (void)flac_utime(filename, &srctime); + #if !defined _MSC_VER && !defined __BORLANDC__ && !defined __MINGW32__ +diff --git a/src/share/grabbag/file.c b/src/share/grabbag/file.c +index 2c67bebf..edd835a6 100644 +--- a/src/share/grabbag/file.c ++++ b/src/share/grabbag/file.c +@@ -27,7 +27,6 @@ + #include <fcntl.h> /* for _O_BINARY */ + #else + #include <sys/types.h> /* some flavors of BSD (like OS X) require this to get time_t */ +-#include <utime.h> /* for utime() */ + #endif + #if defined __EMX__ + #include <io.h> /* for setmode(), O_BINARY */ +@@ -53,11 +52,17 @@ + void grabbag__file_copy_metadata(const char *srcpath, const char *destpath) + { + struct flac_stat_s srcstat; +- struct utimbuf srctime; + + if(0 == flac_stat(srcpath, &srcstat)) { ++#if _POSIX_C_SOURCE >= 200809L ++ struct timespec srctime[2] = {}; ++ srctime[0].tv_sec = srcstat.st_atime; ++ srctime[1].tv_sec = srcstat.st_mtime; ++#else ++ struct utimbuf srctime; + srctime.actime = srcstat.st_atime; + srctime.modtime = srcstat.st_mtime; ++#endif + (void)flac_chmod(destpath, srcstat.st_mode); + (void)flac_utime(destpath, &srctime); + } +-- +2.17.1 + diff --git a/libs/flac/patches/100-CVE-2017-6888.patch b/libs/flac/patches/100-CVE-2017-6888.patch deleted file mode 100644 index 3de0cc5d3..000000000 --- a/libs/flac/patches/100-CVE-2017-6888.patch +++ /dev/null @@ -1,27 +0,0 @@ -From 4f47b63e9c971e6391590caf00a0f2a5ed612e67 Mon Sep 17 00:00:00 2001 -From: Erik de Castro Lopo <erikd@mega-nerd.com> -Date: Sat, 8 Apr 2017 18:34:49 +1000 -Subject: [PATCH] stream_decoder.c: Fix a memory leak - -Leak reported by Secunia Research. ---- - src/libFLAC/stream_decoder.c | 3 +++ - 1 file changed, 3 insertions(+) - -diff --git a/src/libFLAC/stream_decoder.c b/src/libFLAC/stream_decoder.c -index 14d5fe7f..a5527511 100644 ---- a/src/libFLAC/stream_decoder.c -+++ b/src/libFLAC/stream_decoder.c -@@ -1759,6 +1759,9 @@ FLAC__bool read_metadata_vorbiscomment_(FLAC__StreamDecoder *decoder, FLAC__Stre - } - memset (obj->comments[i].entry, 0, obj->comments[i].length) ; - if (!FLAC__bitreader_read_byte_block_aligned_no_crc(decoder->private_->input, obj->comments[i].entry, obj->comments[i].length)) { -+ /* Current i-th entry is bad, so we delete it. */ -+ free (obj->comments[i].entry) ; -+ obj->comments[i].entry = NULL ; - obj->num_comments = i; - goto skip; - } --- -2.17.0 - diff --git a/libs/gnutls/Makefile b/libs/gnutls/Makefile index 36837c111..61312207e 100644 --- a/libs/gnutls/Makefile +++ b/libs/gnutls/Makefile @@ -8,13 +8,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=gnutls -PKG_VERSION:=3.6.8 -PKG_RELEASE:=2 +PKG_VERSION:=3.6.9 +PKG_RELEASE:=1 PKG_USE_MIPS16:=0 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://www.gnupg.org/ftp/gcrypt/gnutls/v3.6 -PKG_HASH:=aa81944e5635de981171772857e72be231a7e0f559ae0292d2737de475383e83 +PKG_HASH:=4331fca55817ecdd74450b908a6c29b4f05bb24dd13144c6284aa34d872e1fcb #PKG_FIXUP:=autoreconf gettext-version PKG_MAINTAINER:=Nikos Mavrogiannopoulos <nmav@gnutls.org> PKG_LICENSE:=LGPLv2.1+ @@ -104,7 +104,10 @@ $(call Package/gnutls/Default/description) This package contains the GnuTLS shared library, needed by other programs. endef - +# We disable the configuration file (system-priority-file) because +# the use of configuration increases the non-shared memory used by +# the library and we don't provide an openwrt-specific configuration +# anyway. CONFIGURE_ARGS+= \ --enable-shared \ --enable-static \ @@ -124,7 +127,8 @@ CONFIGURE_ARGS+= \ --with-default-trust-store-dir=/etc/ssl/certs/ \ --with-included-unistring \ --with-librt-prefix="$(LIBRT_ROOT_DIR)/" \ - --with-pic + --with-pic \ + --with-system-priority-file="" ifneq ($(CONFIG_GNUTLS_EXT_LIBTASN1),y) CONFIGURE_ARGS += --with-included-libtasn1 diff --git a/libs/intltool/Makefile b/libs/intltool/Makefile index 93952271a..244f59995 100644 --- a/libs/intltool/Makefile +++ b/libs/intltool/Makefile @@ -23,7 +23,7 @@ include $(INCLUDE_DIR)/host-build.mk include $(INCLUDE_DIR)/package.mk HOST_CONFIGURE_VARS+= \ - PATH=$(STAGING_DIR_HOSTPKG)/bin:$(STAGING_DIR_HOSTPKG)/usr/bin:$(PATH) + PATH="$(STAGING_DIR_HOSTPKG)/bin:$(STAGING_DIR_HOSTPKG)/usr/bin:$(PATH)" define Package/intltool SECTION:=libs diff --git a/libs/libarchive/Makefile b/libs/libarchive/Makefile index 6d579644b..b2e6fcf10 100644 --- a/libs/libarchive/Makefile +++ b/libs/libarchive/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libarchive -PKG_VERSION:=3.3.3 -PKG_RELEASE:=3 +PKG_VERSION:=3.4.0 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=https://www.libarchive.org/downloads -PKG_HASH:=ba7eb1781c9fbbae178c4c6bad1c6eb08edab9a1496c64833d1715d022b30e2e +PKG_SOURCE_URL:=https://codeload.github.com/libarchive/libarchive/tar.gz/v$(PKG_VERSION)? +PKG_HASH:=c160d3c45010a51a924208f13f6b7b956dabdf8c5c60195df188a599028caa7c PKG_MAINTAINER:=Johannes Morgenroth <morgenroth@ibr.cs.tu-bs.de> PKG_LICENSE:=BSD-2-Clause @@ -27,38 +27,49 @@ PKG_FIXUP:=autoreconf include $(INCLUDE_DIR)/package.mk define Package/libarchive/Default - SECTION:=libs - CATEGORY:=Libraries - DEPENDS:=+zlib +liblzma +libbz2 +libexpat - TITLE:=Multi-format archive and compression library - URL:=https://www.libarchive.org/ + SECTION:=libs + CATEGORY:=Libraries + DEPENDS:=+zlib +liblzma +libbz2 +libexpat + TITLE:=Multi-format archive and compression library + URL:=https://www.libarchive.org/ endef define Package/libarchive $(call Package/libarchive/Default) - DEPENDS += +libopenssl + DEPENDS += +libopenssl endef define Package/libarchive-noopenssl $(call Package/libarchive/Default) - TITLE += (without OpenSSL dependency) - VARIANT:=noopenssl + TITLE += (without OpenSSL dependency) + VARIANT:=noopenssl +endef + +define Package/bsdtar/Default + SECTION:=utils + CATEGORY:=Utilities + SUBMENU:=Compression + TITLE:=tar BSD variant + URL:=https://www.libarchive.org/ endef define Package/bsdtar - SECTION:=utils - CATEGORY:=Utilities - SUBMENU:=Compression - DEPENDS:=+libarchive-noopenssl - TITLE:=BSD variant that supports various file compression formats - URL:=http://www.libarchive.org/ + $(call Package/bsdtar/Default) + DEPENDS:= +libarchive +endef + +define Package/bsdtar-noopenssl + $(call Package/bsdtar/Default) + TITLE += (without OpenSSL dependency) + DEPENDS:= +libarchive-noopenssl + VARIANT:=noopenssl endef define Package/bsdtar/description - Reads a variety of formats including tar, pax, zip, xar, lha, ar, - cab, mtree, rar, warc, 7z and ISO images. Writes tar, pax, zip, - xar, ar, ISO, mtree and shar archives. Automatically handles - archives compressed with gzip, bzip2, lzip, xz, lzma or compress. + Reads a variety of formats including tar, pax, zip, xar, lha, ar, + cab, mtree, rar, warc, 7z and ISO images. Writes tar, pax, zip, + xar, ar, ISO, mtree and shar archives. Automatically handles + archives compressed with gzip, bzip2, lzip, xz, lzma or compress. endef CONFIGURE_ARGS += \ @@ -99,7 +110,9 @@ define Package/bsdtar/install endef Package/libarchive-noopenssl/install = $(Package/libarchive/install) +Package/bsdtar-noopenssl/install = $(Package/bsdtar/install) $(eval $(call BuildPackage,libarchive)) $(eval $(call BuildPackage,libarchive-noopenssl)) $(eval $(call BuildPackage,bsdtar)) +$(eval $(call BuildPackage,bsdtar-noopenssl)) diff --git a/libs/libarchive/patches/100-CVE-2018-1000880.patch b/libs/libarchive/patches/100-CVE-2018-1000880.patch deleted file mode 100644 index 0d9566ff9..000000000 --- a/libs/libarchive/patches/100-CVE-2018-1000880.patch +++ /dev/null @@ -1,37 +0,0 @@ -From 9c84b7426660c09c18cc349f6d70b5f8168b5680 Mon Sep 17 00:00:00 2001 -From: Daniel Axtens <dja@axtens.net> -Date: Tue, 4 Dec 2018 16:33:42 +1100 -Subject: [PATCH] warc: consume data once read - -The warc decoder only used read ahead, it wouldn't actually consume -data that had previously been printed. This means that if you specify -an invalid content length, it will just reprint the same data over -and over and over again until it hits the desired length. - -This means that a WARC resource with e.g. -Content-Length: 666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666666665 -but only a few hundred bytes of data, causes a quasi-infinite loop. - -Consume data in subsequent calls to _warc_read. - -Found with an AFL + afl-rb + qsym setup. ---- - libarchive/archive_read_support_format_warc.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/libarchive/archive_read_support_format_warc.c b/libarchive/archive_read_support_format_warc.c -index e8753853f..e8fc8428b 100644 ---- a/libarchive/archive_read_support_format_warc.c -+++ b/libarchive/archive_read_support_format_warc.c -@@ -386,6 +386,11 @@ _warc_read(struct archive_read *a, const void **buf, size_t *bsz, int64_t *off) - return (ARCHIVE_EOF); - } - -+ if (w->unconsumed) { -+ __archive_read_consume(a, w->unconsumed); -+ w->unconsumed = 0U; -+ } -+ - rab = __archive_read_ahead(a, 1U, &nrd); - if (nrd < 0) { - *bsz = 0U; diff --git a/libs/libarchive/patches/101-CVE-2018-1000879.patch b/libs/libarchive/patches/101-CVE-2018-1000879.patch deleted file mode 100644 index ecd4da537..000000000 --- a/libs/libarchive/patches/101-CVE-2018-1000879.patch +++ /dev/null @@ -1,43 +0,0 @@ -From 15bf44fd2c1ad0e3fd87048b3fcc90c4dcff1175 Mon Sep 17 00:00:00 2001 -From: Daniel Axtens <dja@axtens.net> -Date: Tue, 4 Dec 2018 14:29:42 +1100 -Subject: [PATCH] Skip 0-length ACL fields - -Currently, it is possible to create an archive that crashes bsdtar -with a malformed ACL: - -Program received signal SIGSEGV, Segmentation fault. -archive_acl_from_text_l (acl=<optimised out>, text=0x7e2e92 "", want_type=<optimised out>, sc=<optimised out>) at libarchive/archive_acl.c:1726 -1726 switch (*s) { -(gdb) p n -$1 = 1 -(gdb) p field[n] -$2 = {start = 0x0, end = 0x0} - -Stop this by checking that the length is not zero before beginning -the switch statement. - -I am pretty sure this is the bug mentioned in the qsym paper [1], -and I was able to replicate it with a qsym + AFL + afl-rb setup. - -[1] https://www.usenix.org/conference/usenixsecurity18/presentation/yun ---- - libarchive/archive_acl.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/libarchive/archive_acl.c b/libarchive/archive_acl.c -index 512beee1f..7beeee86e 100644 ---- a/libarchive/archive_acl.c -+++ b/libarchive/archive_acl.c -@@ -1723,6 +1723,11 @@ archive_acl_from_text_l(struct archive_acl *acl, const char *text, - st = field[n].start + 1; - len = field[n].end - field[n].start; - -+ if (len == 0) { -+ ret = ARCHIVE_WARN; -+ continue; -+ } -+ - switch (*s) { - case 'u': - if (len == 1 || (len == 4 diff --git a/libs/libarchive/patches/102-CVE-2018-1000878.patch b/libs/libarchive/patches/102-CVE-2018-1000878.patch deleted file mode 100644 index df4896924..000000000 --- a/libs/libarchive/patches/102-CVE-2018-1000878.patch +++ /dev/null @@ -1,72 +0,0 @@ -From bfcfe6f04ed20db2504db8a254d1f40a1d84eb28 Mon Sep 17 00:00:00 2001 -From: Daniel Axtens <dja@axtens.net> -Date: Tue, 4 Dec 2018 00:55:22 +1100 -Subject: [PATCH] rar: file split across multi-part archives must match - -Fuzzing uncovered some UAF and memory overrun bugs where a file in a -single file archive reported that it was split across multiple -volumes. This was caused by ppmd7 operations calling -rar_br_fillup. This would invoke rar_read_ahead, which would in some -situations invoke archive_read_format_rar_read_header. That would -check the new file name against the old file name, and if they didn't -match up it would free the ppmd7 buffer and allocate a new -one. However, because the ppmd7 decoder wasn't actually done with the -buffer, it would continue to used the freed buffer. Both reads and -writes to the freed region can be observed. - -This is quite tricky to solve: once the buffer has been freed it is -too late, as the ppmd7 decoder functions almost universally assume -success - there's no way for ppmd_read to signal error, nor are there -good ways for functions like Range_Normalise to propagate them. So we -can't detect after the fact that we're in an invalid state - e.g. by -checking rar->cursor, we have to prevent ourselves from ever ending up -there. So, when we are in the dangerous part or rar_read_ahead that -assumes a valid split, we set a flag force read_header to either go -down the path for split files or bail. This means that the ppmd7 -decoder keeps a valid buffer and just runs out of data. - -Found with a combination of AFL, afl-rb and qsym. ---- - libarchive/archive_read_support_format_rar.c | 9 +++++++++ - 1 file changed, 9 insertions(+) - -diff --git a/libarchive/archive_read_support_format_rar.c b/libarchive/archive_read_support_format_rar.c -index 6f419c270..a8cc5c94d 100644 ---- a/libarchive/archive_read_support_format_rar.c -+++ b/libarchive/archive_read_support_format_rar.c -@@ -258,6 +258,7 @@ struct rar - struct data_block_offsets *dbo; - unsigned int cursor; - unsigned int nodes; -+ char filename_must_match; - - /* LZSS members */ - struct huffman_code maincode; -@@ -1560,6 +1561,12 @@ read_header(struct archive_read *a, struct archive_entry *entry, - } - return ret; - } -+ else if (rar->filename_must_match) -+ { -+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, -+ "Mismatch of file parts split across multi-volume archive"); -+ return (ARCHIVE_FATAL); -+ } - - rar->filename_save = (char*)realloc(rar->filename_save, - filename_size + 1); -@@ -2933,12 +2940,14 @@ rar_read_ahead(struct archive_read *a, size_t min, ssize_t *avail) - else if (*avail == 0 && rar->main_flags & MHD_VOLUME && - rar->file_flags & FHD_SPLIT_AFTER) - { -+ rar->filename_must_match = 1; - ret = archive_read_format_rar_read_header(a, a->entry); - if (ret == (ARCHIVE_EOF)) - { - rar->has_endarc_header = 1; - ret = archive_read_format_rar_read_header(a, a->entry); - } -+ rar->filename_must_match = 0; - if (ret != (ARCHIVE_OK)) - return NULL; - return rar_read_ahead(a, min, avail); diff --git a/libs/libarchive/patches/103-CVE-2018-1000877.patch b/libs/libarchive/patches/103-CVE-2018-1000877.patch deleted file mode 100644 index 7998b5583..000000000 --- a/libs/libarchive/patches/103-CVE-2018-1000877.patch +++ /dev/null @@ -1,31 +0,0 @@ -From 021efa522ad729ff0f5806c4ce53e4a6cc1daa31 Mon Sep 17 00:00:00 2001 -From: Daniel Axtens <dja@axtens.net> -Date: Tue, 20 Nov 2018 17:56:29 +1100 -Subject: [PATCH] Avoid a double-free when a window size of 0 is specified - -new_size can be 0 with a malicious or corrupted RAR archive. - -realloc(area, 0) is equivalent to free(area), so the region would -be free()d here and the free()d again in the cleanup function. - -Found with a setup running AFL, afl-rb, and qsym. ---- - libarchive/archive_read_support_format_rar.c | 5 +++++ - 1 file changed, 5 insertions(+) - -diff --git a/libarchive/archive_read_support_format_rar.c b/libarchive/archive_read_support_format_rar.c -index 234522229..6f419c270 100644 ---- a/libarchive/archive_read_support_format_rar.c -+++ b/libarchive/archive_read_support_format_rar.c -@@ -2300,6 +2300,11 @@ parse_codes(struct archive_read *a) - new_size = DICTIONARY_MAX_SIZE; - else - new_size = rar_fls((unsigned int)rar->unp_size) << 1; -+ if (new_size == 0) { -+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, -+ "Zero window size is invalid."); -+ return (ARCHIVE_FATAL); -+ } - new_window = realloc(rar->lzss.window, new_size); - if (new_window == NULL) { - archive_set_error(&a->archive, ENOMEM, diff --git a/libs/libarchive/patches/104-CVE-2019-1000019.patch b/libs/libarchive/patches/104-CVE-2019-1000019.patch deleted file mode 100644 index a7df5a2c0..000000000 --- a/libs/libarchive/patches/104-CVE-2019-1000019.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 65a23f5dbee4497064e9bb467f81138a62b0dae1 Mon Sep 17 00:00:00 2001 -From: Daniel Axtens <dja@axtens.net> -Date: Tue, 1 Jan 2019 16:01:40 +1100 -Subject: [PATCH] 7zip: fix crash when parsing certain archives - -Fuzzing with CRCs disabled revealed that a call to get_uncompressed_data() -would sometimes fail to return at least 'minimum' bytes. This can cause -the crc32() invocation in header_bytes to read off into invalid memory. - -A specially crafted archive can use this to cause a crash. - -An ASAN trace is below, but ASAN is not required - an uninstrumented -binary will also crash. - -==7719==ERROR: AddressSanitizer: SEGV on unknown address 0x631000040000 (pc 0x7fbdb3b3ec1d bp 0x7ffe77a51310 sp 0x7ffe77a51150 T0) -==7719==The signal is caused by a READ memory access. - #0 0x7fbdb3b3ec1c in crc32_z (/lib/x86_64-linux-gnu/libz.so.1+0x2c1c) - #1 0x84f5eb in header_bytes (/tmp/libarchive/bsdtar+0x84f5eb) - #2 0x856156 in read_Header (/tmp/libarchive/bsdtar+0x856156) - #3 0x84e134 in slurp_central_directory (/tmp/libarchive/bsdtar+0x84e134) - #4 0x849690 in archive_read_format_7zip_read_header (/tmp/libarchive/bsdtar+0x849690) - #5 0x5713b7 in _archive_read_next_header2 (/tmp/libarchive/bsdtar+0x5713b7) - #6 0x570e63 in _archive_read_next_header (/tmp/libarchive/bsdtar+0x570e63) - #7 0x6f08bd in archive_read_next_header (/tmp/libarchive/bsdtar+0x6f08bd) - #8 0x52373f in read_archive (/tmp/libarchive/bsdtar+0x52373f) - #9 0x5257be in tar_mode_x (/tmp/libarchive/bsdtar+0x5257be) - #10 0x51daeb in main (/tmp/libarchive/bsdtar+0x51daeb) - #11 0x7fbdb27cab96 in __libc_start_main /build/glibc-OTsEL5/glibc-2.27/csu/../csu/libc-start.c:310 - #12 0x41dd09 in _start (/tmp/libarchive/bsdtar+0x41dd09) - -This was primarly done with afl and FairFuzz. Some early corpus entries -may have been generated by qsym. ---- - libarchive/archive_read_support_format_7zip.c | 8 +------- - 1 file changed, 1 insertion(+), 7 deletions(-) - -diff --git a/libarchive/archive_read_support_format_7zip.c b/libarchive/archive_read_support_format_7zip.c -index bccbf8966..b6d1505d3 100644 ---- a/libarchive/archive_read_support_format_7zip.c -+++ b/libarchive/archive_read_support_format_7zip.c -@@ -2964,13 +2964,7 @@ get_uncompressed_data(struct archive_read *a, const void **buff, size_t size, - if (zip->codec == _7Z_COPY && zip->codec2 == (unsigned long)-1) { - /* Copy mode. */ - -- /* -- * Note: '1' here is a performance optimization. -- * Recall that the decompression layer returns a count of -- * available bytes; asking for more than that forces the -- * decompressor to combine reads by copying data. -- */ -- *buff = __archive_read_ahead(a, 1, &bytes_avail); -+ *buff = __archive_read_ahead(a, minimum, &bytes_avail); - if (bytes_avail <= 0) { - archive_set_error(&a->archive, - ARCHIVE_ERRNO_FILE_FORMAT, diff --git a/libs/libarchive/patches/105-CVE-2019-1000020.patch b/libs/libarchive/patches/105-CVE-2019-1000020.patch deleted file mode 100644 index 86bbd9d13..000000000 --- a/libs/libarchive/patches/105-CVE-2019-1000020.patch +++ /dev/null @@ -1,55 +0,0 @@ -From 8312eaa576014cd9b965012af51bc1f967b12423 Mon Sep 17 00:00:00 2001 -From: Daniel Axtens <dja@axtens.net> -Date: Tue, 1 Jan 2019 17:10:49 +1100 -Subject: [PATCH] iso9660: Fail when expected Rockridge extensions is missing - -A corrupted or malicious ISO9660 image can cause read_CE() to loop -forever. - -read_CE() calls parse_rockridge(), expecting a Rockridge extension -to be read. However, parse_rockridge() is structured as a while -loop starting with a sanity check, and if the sanity check fails -before the loop has run, the function returns ARCHIVE_OK without -advancing the position in the file. This causes read_CE() to retry -indefinitely. - -Make parse_rockridge() return ARCHIVE_WARN if it didn't read an -extension. As someone with no real knowledge of the format, this -seems more apt than ARCHIVE_FATAL, but both the call-sites escalate -it to a fatal error immediately anyway. - -Found with a combination of AFL, afl-rb (FairFuzz) and qsym. ---- - libarchive/archive_read_support_format_iso9660.c | 11 ++++++++++- - 1 file changed, 10 insertions(+), 1 deletion(-) - -diff --git a/libarchive/archive_read_support_format_iso9660.c b/libarchive/archive_read_support_format_iso9660.c -index 28acfefbb..bad8f1dfe 100644 ---- a/libarchive/archive_read_support_format_iso9660.c -+++ b/libarchive/archive_read_support_format_iso9660.c -@@ -2102,6 +2102,7 @@ parse_rockridge(struct archive_read *a, struct file_info *file, - const unsigned char *p, const unsigned char *end) - { - struct iso9660 *iso9660; -+ int entry_seen = 0; - - iso9660 = (struct iso9660 *)(a->format->data); - -@@ -2257,8 +2258,16 @@ parse_rockridge(struct archive_read *a, struct file_info *file, - } - - p += p[2]; -+ entry_seen = 1; -+ } -+ -+ if (entry_seen) -+ return (ARCHIVE_OK); -+ else { -+ archive_set_error(&a->archive, ARCHIVE_ERRNO_FILE_FORMAT, -+ "Tried to parse Rockridge extensions, but none found"); -+ return (ARCHIVE_WARN); - } -- return (ARCHIVE_OK); - } - - static int diff --git a/libs/libgd/Makefile b/libs/libgd/Makefile index 8d4fc08a5..75593183a 100644 --- a/libs/libgd/Makefile +++ b/libs/libgd/Makefile @@ -9,16 +9,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libgd PKG_VERSION:=2.2.5 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://github.com/$(PKG_NAME)/$(PKG_NAME)/releases/download/gd-$(PKG_VERSION)/ PKG_HASH:=8c302ccbf467faec732f0741a859eef4ecae22fea2d2ab87467be940842bde51 + PKG_MAINTAINER:=Jo-Philipp Wich <jo@mein.io> PKG_LICENSE:=MIT +PKG_LICENSE_FILES:=COPYING PKG_FIXUP:=autoreconf - PKG_INSTALL:=1 PKG_BUILD_PARALLEL:=1 @@ -29,7 +30,7 @@ define Package/libgd CATEGORY:=Libraries DEPENDS:=+libjpeg +libpng +LIBGD_TIFF:libtiff +LIBGD_FREETYPE:libfreetype TITLE:=The GD graphics library - URL:=http://www.libgd.org/ + URL:=https://libgd.github.io/ MENU:=1 endef @@ -84,7 +85,9 @@ else endif CONFIGURE_VARS += \ - ac_cv_header_iconv_h=no + ac_cv_header_iconv_h=no \ + am_cv_func_iconv_works=no \ + am_func_iconv=no define Build/InstallDev $(INSTALL_DIR) $(1)/usr/bin diff --git a/libs/libglog/Makefile b/libs/libglog/Makefile index 83ff65cb7..f70b7a1cf 100644 --- a/libs/libglog/Makefile +++ b/libs/libglog/Makefile @@ -2,31 +2,29 @@ include $(TOPDIR)/rules.mk PKG_NAME:=glog PKG_VERSION:=0.4.0 -PKG_RELEASE:=3 +PKG_RELEASE:=4 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/google/glog/tar.gz/v$(PKG_VERSION)? PKG_HASH:=f28359aeba12f30d73d9e4711ef356dc842886968112162bc73002645139c39c +PKG_MAINTAINER:=Amir Sabbaghi <asaba90@gmail.com> PKG_LICENSE:=BSD-3-Clause PKG_LICENSE_FILES:=COPYING PKG_FIXUP:=autoreconf - PKG_BUILD_PARALLEL:=1 -PKG_INSTALL:=1 - -PKG_BUILD_DEPENDS:=libgflags +CMAKE_INSTALL:=1 include $(INCLUDE_DIR)/package.mk +include $(INCLUDE_DIR)/cmake.mk define Package/glog SECTION:=libs CATEGORY:=Libraries TITLE:=C++ implementation of the Google logging module - DEPENDS:= +libstdcpp +libpthread +gflags + DEPENDS:=+libstdcpp +libpthread +gflags URL:=https://github.com/google/glog - MAINTAINER:=Amir Sabbaghi <amir@pichak.co> endef define Package/glog/description @@ -34,23 +32,13 @@ define Package/glog/description module. Documentation for the implementation is in doc/. endef -CONFIGURE_VARS+=ac_cv_header_libunwind_h=0 +CMAKE_OPTIONS += \ + -DBUILD_SHARED_LIBS=ON \ + -DBUILD_TESTING=OFF \ + -DUNWIND_LIBRARY=OFF -TARGET_CXXFLAGS+=-std=c++11 -TARGET_LDFLAGS+=-lpthread - -define Build/Configure - $(call Build/Configure/Default,) -endef - -define Build/InstallDev - $(INSTALL_DIR) $(1)/usr/include/glog - $(CP) $(PKG_INSTALL_DIR)/usr/include/glog/*.h $(1)/usr/include/glog - $(INSTALL_DIR) $(1)/usr/lib - $(CP) $(PKG_INSTALL_DIR)/usr/lib/libglog.{a,so*} $(1)/usr/lib - $(INSTALL_DIR) $(1)/usr/lib/pkgconfig - $(CP) $(PKG_INSTALL_DIR)/usr/lib/pkgconfig/libglog.pc $(1)/usr/lib/pkgconfig/ -endef +TARGET_CFLAGS += -ffunction-sections -fdata-sections -flto +TARGET_CFLAGS += -Wl,--gc-sections define Package/glog/install $(INSTALL_DIR) $(1)/usr/lib diff --git a/libs/libglog/patches/110-nanosleep.patch b/libs/libglog/patches/110-nanosleep.patch new file mode 100644 index 000000000..8d1a2ffa0 --- /dev/null +++ b/libs/libglog/patches/110-nanosleep.patch @@ -0,0 +1,24 @@ +From d7b02b6929baf5b21ee6e15a700b4fc82d962e9c Mon Sep 17 00:00:00 2001 +From: Rosen Penev <rosenp@gmail.com> +Date: Thu, 25 Jul 2019 19:14:42 -0700 +Subject: [PATCH] googletest: Switch to nanosleep + +usleep is deprecated and optionally not available with uClibc-ng. +--- + src/googletest.h | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/src/googletest.h b/src/googletest.h +index 49ddbc0a..39fb29fb 100644 +--- a/src/googletest.h ++++ b/src/googletest.h +@@ -574,7 +574,8 @@ class Thread { + + static inline void SleepForMilliseconds(int t) { + #ifndef OS_WINDOWS +- usleep(t * 1000); ++ const struct timespec req = {0, t * 1000 * 1000}; ++ nanosleep(&req, NULL); + #else + Sleep(t); + #endif diff --git a/libs/libgpiod/Makefile b/libs/libgpiod/Makefile index d710f59cd..ce393cdb4 100644 --- a/libs/libgpiod/Makefile +++ b/libs/libgpiod/Makefile @@ -1,5 +1,5 @@ # -# Copyright (C) 2018 Michael Heimpold <mhei@heimpold.de> +# Copyright (C) 2018-2019 Michael Heimpold <mhei@heimpold.de> # # This is free software, licensed under the GNU General Public License v2. # See /LICENSE for more information. @@ -8,14 +8,14 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libgpiod -PKG_VERSION:=1.3 +PKG_VERSION:=1.4 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=@KERNEL/software/libs/libgpiod/ -PKG_HASH:=6ec837f23e8f2196e5976dec4ac81403170830075e7f33ede1394eaf67f2e962 +PKG_HASH:=ebde83aaf14be3abd33e7a90faa487a2ee231e242897afe7fdefb765386b3c8b -PKG_LICENSE:=LGPL-2.1+ +PKG_LICENSE:=LGPL-2.1-or-later PKG_LICENSE_FILES:=COPYING PKG_MAINTAINER:=Michael Heimpold <mhei@heimpold.de> diff --git a/libs/libhttp-parser/Makefile b/libs/libhttp-parser/Makefile index 9e9a85ff3..8743c379d 100644 --- a/libs/libhttp-parser/Makefile +++ b/libs/libhttp-parser/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libhttp-parser PKG_VERSION:=2.9.2 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/nodejs/http-parser/tar.gz/v$(PKG_VERSION)? @@ -41,18 +41,19 @@ define Package/libhttp-parser/description (in a web server that is per connection). endef -MAKE_FLAGS+=library +MAKE_FLAGS+=library \ + PREFIX=/usr define Build/InstallDev $(INSTALL_DIR) $(1)/usr/include - $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/local/include/http_parser.h $(1)/usr/include/ + $(INSTALL_DATA) $(PKG_INSTALL_DIR)/usr/include/http_parser.h $(1)/usr/include/ $(INSTALL_DIR) $(1)/usr/lib - $(CP) $(PKG_INSTALL_DIR)/usr/local/lib/libhttp_parser.so* $(1)/usr/lib/ + $(CP) $(PKG_INSTALL_DIR)/usr/lib/libhttp_parser.so* $(1)/usr/lib/ endef define Package/libhttp-parser/install $(INSTALL_DIR) $(1)/usr/lib - $(CP) $(PKG_INSTALL_DIR)/usr/local/lib/libhttp_parser.so* $(1)/usr/lib/ + $(CP) $(PKG_INSTALL_DIR)/usr/lib/libhttp_parser.so* $(1)/usr/lib/ endef $(eval $(call BuildPackage,libhttp-parser)) diff --git a/libs/libhttp-parser/patches/000-fix_darwin_error.patch b/libs/libhttp-parser/patches/000-fix_darwin_error.patch new file mode 100644 index 000000000..6937b5e58 --- /dev/null +++ b/libs/libhttp-parser/patches/000-fix_darwin_error.patch @@ -0,0 +1,51 @@ +--- a/Makefile ++++ b/Makefile +@@ -25,11 +25,7 @@ + SOMAJOR = 2 + SOMINOR = 9 + SOREV = 2 +-ifeq (darwin,$(PLATFORM)) +-SOEXT ?= dylib +-SONAME ?= $(SOLIBNAME).$(SOMAJOR).$(SOMINOR).$(SOEXT) +-LIBNAME ?= $(SOLIBNAME).$(SOMAJOR).$(SOMINOR).$(SOREV).$(SOEXT) +-else ifeq (wine,$(PLATFORM)) ++ifeq (wine,$(PLATFORM)) + CC = winegcc + BINEXT = .exe.so + HELPER = wine +@@ -65,12 +61,8 @@ + LIBDIR = $(PREFIX)/lib + INCLUDEDIR = $(PREFIX)/include + +-ifeq (darwin,$(PLATFORM)) +-LDFLAGS_LIB += -Wl,-install_name,$(LIBDIR)/$(SONAME) +-else + # TODO(bnoordhuis) The native SunOS linker expects -h rather than -soname... + LDFLAGS_LIB += -Wl,-soname=$(SONAME) +-endif + + test: test_g test_fast + $(HELPER) ./test_g$(BINEXT) +@@ -131,14 +123,18 @@ + ctags $^ + + install: library +- $(INSTALL) -D http_parser.h $(DESTDIR)$(INCLUDEDIR)/http_parser.h +- $(INSTALL) -D $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(LIBNAME) ++ $(INSTALL) -d $(DESTDIR)$(INCLUDEDIR) ++ $(INSTALL) -d $(DESTDIR)$(LIBDIR) ++ $(INSTALL) http_parser.h $(DESTDIR)$(INCLUDEDIR)/http_parser.h ++ $(INSTALL) $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(LIBNAME) + ln -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SONAME) + ln -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SOLIBNAME).$(SOEXT) + + install-strip: library +- $(INSTALL) -D http_parser.h $(DESTDIR)$(INCLUDEDIR)/http_parser.h +- $(INSTALL) -D -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(LIBNAME) ++ $(INSTALL) -d $(DESTDIR)$(INCLUDEDIR) ++ $(INSTALL) -d $(DESTDIR)$(LIBDIR) ++ $(INSTALL) http_parser.h $(DESTDIR)$(INCLUDEDIR)/http_parser.h ++ $(INSTALL) -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(LIBNAME) + ln -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SONAME) + ln -s $(LIBNAME) $(DESTDIR)$(LIBDIR)/$(SOLIBNAME).$(SOEXT) + diff --git a/libs/liblz4/Makefile b/libs/liblz4/Makefile index dd4c848e7..e6c4d61d6 100644 --- a/libs/liblz4/Makefile +++ b/libs/liblz4/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=liblz4 PKG_VERSION:=1.9.1 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/lz4/lz4/tar.gz/v$(PKG_VERSION)? diff --git a/libs/liblz4/patches/010-utime.patch b/libs/liblz4/patches/010-utime.patch new file mode 100644 index 000000000..6c21c332b --- /dev/null +++ b/libs/liblz4/patches/010-utime.patch @@ -0,0 +1,72 @@ +From e9d5a3cbbb47eb0f785a409d836225b592b250f3 Mon Sep 17 00:00:00 2001 +From: Rosen Penev <rosenp@gmail.com> +Date: Tue, 30 Jul 2019 22:13:51 -0700 +Subject: [PATCH] util.h: Remove deprecated utime for non-Windows + +utime was deprecated in POSIX 2008. +--- + programs/platform.h | 2 +- + programs/util.h | 17 ++++++++++++++++- + 2 files changed, 17 insertions(+), 2 deletions(-) + +diff --git a/programs/platform.h b/programs/platform.h +index c0b38402..7e2cb58f 100644 +--- a/programs/platform.h ++++ b/programs/platform.h +@@ -86,7 +86,7 @@ extern "C" { + # else + # if defined(__linux__) || defined(__linux) + # ifndef _POSIX_C_SOURCE +-# define _POSIX_C_SOURCE 200112L /* use feature test macro */ ++# define _POSIX_C_SOURCE 200809L /* use feature test macro */ + # endif + # endif + # include <unistd.h> /* declares _POSIX_VERSION */ +diff --git a/programs/util.h b/programs/util.h +index 1dd515ce..112dddbf 100644 +--- a/programs/util.h ++++ b/programs/util.h +@@ -37,12 +37,17 @@ extern "C" { + #include <assert.h> + #include <sys/types.h> /* stat, utime */ + #include <sys/stat.h> /* stat */ +-#if defined(_MSC_VER) ++#if defined(_WIN32) + # include <sys/utime.h> /* utime */ + # include <io.h> /* _chmod */ + #else + # include <unistd.h> /* chown, stat */ ++#if PLATFORM_POSIX_VERSION < 200809L + # include <utime.h> /* utime */ ++#else ++# include <fcntl.h> /* AT_FDCWD */ ++# include <sys/stat.h> /* for utimensat */ ++#endif + #endif + #include <time.h> /* time */ + #include <limits.h> /* INT_MAX */ +@@ -287,14 +292,24 @@ UTIL_STATIC int UTIL_isRegFile(const char* infilename); + UTIL_STATIC int UTIL_setFileStat(const char *filename, stat_t *statbuf) + { + int res = 0; ++#if defined(_WIN32) || (PLATFORM_POSIX_VERSION < 200809L) + struct utimbuf timebuf; ++#else ++ struct timespec timebuf[2] = {}; ++#endif + + if (!UTIL_isRegFile(filename)) + return -1; + ++#if defined(_WIN32) || (PLATFORM_POSIX_VERSION < 200809L) + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + res += utime(filename, &timebuf); /* set access and modification times */ ++#else ++ timebuf[0].tv_nsec = UTIME_NOW; ++ timebuf[1].tv_sec = statbuf->st_mtime; ++ res += utimensat(AT_FDCWD, filename, timebuf, 0); /* set access and modification times */ ++#endif + + #if !defined(_WIN32) + res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ diff --git a/libs/libredblack/Makefile b/libs/libredblack/Makefile index 75a616a0a..dcc527eb1 100644 --- a/libs/libredblack/Makefile +++ b/libs/libredblack/Makefile @@ -8,23 +8,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libredblack -PKG_VERSION:=0.2.3 -PKG_RELEASE=$(PKG_SOURCE_VERSION) +PKG_VERSION:=1.3 +PKG_RELEASE:=1 -PKG_LICENSE:=GPL-2.0+ -PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr> - -PKG_SOURCE_PROTO:=git -PKG_SOURCE_VERSION:=a399310d99b61eec4d3c0677573ab5dddcf9395d -PKG_MIRROR_HASH:=71b05e70988b97865f734c698dd5564e349680556ccb8634a5bddf344012f22a -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz -PKG_SOURCE_URL:=https://github.com/sysrepo/libredblack.git -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION) +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=@SF/libredblack +PKG_HASH:=a0ecc59b0aae2df01558a6950532c711a782a099277b439a51d270003092f44f -PKG_BUILD_ROOT:=$(BUILD_DIR)/$(PKG_SOURCE_SUBDIR) -PKG_BUILD_DIR:=$(PKG_BUILD_ROOT) +PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr> +PKG_LICENSE:=GPL-2.0-or-later +PKG_LICENSE_FILES:=COPYING -PKG_FIXUP:=autoreconf PKG_BUILD_PARALLEL:=1 PKG_INSTALL:=1 @@ -35,13 +29,15 @@ define Package/libredblack SECTION:=libs CATEGORY:=Libraries TITLE:=RedBlack tree library - URL:=$(PKG_SOURCE_URL) + URL:=http://libredblack.sourceforge.net/ endef define Package/libredblack/description RedBlack Balanced Tree Searching and Sorting Library. endef +CONFIGURE_ARGS += --without-rbgen + define Build/InstallDev $(INSTALL_DIR) $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/lib/libredblack.{so*,a,la} $(1)/usr/lib/ diff --git a/libs/libsigar/Makefile b/libs/libsigar/Makefile index 7a332e6da..39f22138e 100644 --- a/libs/libsigar/Makefile +++ b/libs/libsigar/Makefile @@ -3,13 +3,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libsigar PKG_SOURCE_DATE:=2017-02-21 PKG_SOURCE_VERSION:=a6c61edf8c64e013411e8c9d753165cd03102c6e -PKG_RELEASE:=1 +PKG_RELEASE:=3 -PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_DATE).tar.gz -PKG_SOURCE_URL:=https://codeload.github.com/boundary/sigar/tar.gz/$(PKG_SOURCE_VERSION)? -PKG_HASH:=5232f0fa994ab60ad4622364fad0297c0054e04f0cfec9c586b14e33bbc387da -PKG_BUILD_DIR:=$(BUILD_DIR)/sigar-$(PKG_SOURCE_VERSION) +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/boundary/sigar +PKG_MIRROR_HASH:=5f017e10ab1d929c9dfb2937fef16a45962b60958cd1569573d18f00fcea290f +PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com> PKG_LICENSE:=Apache-2.0 PKG_LICENSE_FILE:=LICENSE @@ -25,7 +25,6 @@ define Package/libsigar TITLE:=System Information Gatherer And Reporter URL:=https://github.com/boundary/sigar DEPENDS:=+libtirpc - MAINTAINER:=Amol Bhave <ambhave@fb.com> endef define Package/libsigar/description diff --git a/libs/libsigar/patches/010-rindex.patch b/libs/libsigar/patches/010-rindex.patch new file mode 100644 index 000000000..bc0b064a3 --- /dev/null +++ b/libs/libsigar/patches/010-rindex.patch @@ -0,0 +1,18 @@ +--- a/src/sigar_util.c ++++ b/src/sigar_util.c +@@ -954,14 +954,10 @@ int sigar_file2str(const char *fname, char *buffer, int buflen) + #define vsnprintf _vsnprintf + #endif + +-#ifdef WIN32 +-# define rindex strrchr +-#endif +- + static int proc_module_get_self(void *data, char *name, int len) + { + sigar_t *sigar = (sigar_t *)data; +- char *ptr = rindex(name, '/'); ++ char *ptr = strrchr(name, '/'); + + if (!ptr) { + return SIGAR_OK; diff --git a/libs/libsigar/patches/020-sysmacros.patch b/libs/libsigar/patches/020-sysmacros.patch new file mode 100644 index 000000000..334a908a3 --- /dev/null +++ b/libs/libsigar/patches/020-sysmacros.patch @@ -0,0 +1,10 @@ +--- a/src/os/linux/linux_sigar.c ++++ b/src/os/linux/linux_sigar.c +@@ -23,6 +23,7 @@ + #include <linux/param.h> + #include <sys/param.h> + #include <sys/stat.h> ++#include <sys/sysmacros.h> + #include <sys/times.h> + #include <sys/utsname.h> + #include <mntent.h> diff --git a/libs/libtasn1/Makefile b/libs/libtasn1/Makefile index e20fcada5..039580ae4 100644 --- a/libs/libtasn1/Makefile +++ b/libs/libtasn1/Makefile @@ -8,19 +8,20 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libtasn1 -PKG_VERSION:=4.13 -PKG_RELEASE:=2 +PKG_VERSION:=4.14 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=@GNU/$(PKG_NAME) -PKG_HASH:=7e528e8c317ddd156230c4e31d082cd13e7ddeb7a54824be82632209550c8cca +PKG_HASH:=9e604ba5c5c8ea403487695c2e407405820d98540d9de884d6e844f9a9c5ba08 PKG_MAINTAINER:=Nikos Mavrogiannopoulos <n.mavrogiannopoulos@gmail.com> -PKG_LICENSE:=LGPLv2.1+ +PKG_LICENSE:=LGPLv2.1-or-later PKG_LICENSE_FILES:=COPYING.LIB #PKG_FIXUP:=autoreconf PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 include $(INCLUDE_DIR)/package.mk @@ -36,6 +37,7 @@ define Package/libtasn1/description Distinguish Encoding Rules (DER) manipulation. endef +TARGET_CFLAGS += -ffunction-sections -fdata-sections TARGET_LDFLAGS += -Wl,--gc-sections CONFIGURE_ARGS += \ diff --git a/libs/libtirpc/Makefile b/libs/libtirpc/Makefile index d7c8cd25b..333520edf 100644 --- a/libs/libtirpc/Makefile +++ b/libs/libtirpc/Makefile @@ -1,4 +1,4 @@ -# +# # Copyright (C) 2006-2018 OpenWrt.org # # This is free software, licensed under the GNU General Public License v2. @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libtirpc PKG_VERSION:=1.1.4 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE_URL:=@SF/libtirpc PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 diff --git a/libs/libtirpc/patches/010-b-functions.patch b/libs/libtirpc/patches/010-b-functions.patch new file mode 100644 index 000000000..89e882ac6 --- /dev/null +++ b/libs/libtirpc/patches/010-b-functions.patch @@ -0,0 +1,66 @@ +--- a/src/auth_des.c ++++ b/src/auth_des.c +@@ -396,7 +396,7 @@ authdes_validate(AUTH *auth, struct opaque_auth *rverf) + /* + * validate + */ +- if (bcmp((char *)&ad->ad_timestamp, (char *)&verf.adv_timestamp, ++ if (memcmp((char *)&ad->ad_timestamp, (char *)&verf.adv_timestamp, + sizeof(struct timeval)) != 0) { + LIBTIRPC_DEBUG(1, ("authdes_validate: verifier mismatch")); + return (FALSE); +--- a/src/auth_time.c ++++ b/src/auth_time.c +@@ -104,7 +104,7 @@ static int uaddr_to_sockaddr(uaddr, sin) + p_bytes[1] = (unsigned char)a[5] & 0x000000FF; + + sin->sin_family = AF_INET; /* always */ +- bcopy((char *)&p_bytes, (char *)&sin->sin_port, 2); ++ memcpy((char *)&sin->sin_port, (char *)&p_bytes, 2); + + return (0); + } +--- a/src/crypt_client.c ++++ b/src/crypt_client.c +@@ -75,8 +75,8 @@ _des_crypt_call(buf, len, dparms) + des_crypt_1_arg.desbuf.desbuf_val = buf; + des_crypt_1_arg.des_dir = dparms->des_dir; + des_crypt_1_arg.des_mode = dparms->des_mode; +- bcopy(dparms->des_ivec, des_crypt_1_arg.des_ivec, 8); +- bcopy(dparms->des_key, des_crypt_1_arg.des_key, 8); ++ memcpy(des_crypt_1_arg.des_ivec, dparms->des_ivec, 8); ++ memcpy(des_crypt_1_arg.des_key, dparms->des_key, 8); + + result_1 = des_crypt_1(&des_crypt_1_arg, clnt); + if (result_1 == (desresp *) NULL) { +@@ -88,8 +88,8 @@ _des_crypt_call(buf, len, dparms) + + if (result_1->stat == DESERR_NONE || + result_1->stat == DESERR_NOHWDEVICE) { +- bcopy(result_1->desbuf.desbuf_val, buf, len); +- bcopy(result_1->des_ivec, dparms->des_ivec, 8); ++ memcpy(buf, result_1->desbuf.desbuf_val, len); ++ memcpy(dparms->des_ivec, result_1->des_ivec, 8); + } + + clnt_freeres(clnt, (xdrproc_t)xdr_desresp, result_1); +--- a/src/svc_auth_des.c ++++ b/src/svc_auth_des.c +@@ -145,7 +145,7 @@ _svcauth_des(rqst, msg) + return (AUTH_BADCRED); + } + cred->adc_fullname.name = area->area_netname; +- bcopy((char *)ixdr, cred->adc_fullname.name, ++ memcpy(cred->adc_fullname.name, (char *)ixdr, + (u_int)namelen); + cred->adc_fullname.name[namelen] = 0; + ixdr += (RNDUP(namelen) / BYTES_PER_XDR_UNIT); +@@ -419,7 +419,7 @@ cache_spot(key, name, timestamp) + if (cp->key.key.high == hi && + cp->key.key.low == key->key.low && + cp->rname != NULL && +- bcmp(cp->rname, name, strlen(name) + 1) == 0) { ++ memcmp(cp->rname, name, strlen(name) + 1) == 0) { + if (BEFORE(timestamp, &cp->laststamp)) { + svcauthdes_stats.ncachereplays++; + return (-1); /* replay */ diff --git a/libs/libupnpp/Makefile b/libs/libupnpp/Makefile index 8848404ad..fa788c10e 100644 --- a/libs/libupnpp/Makefile +++ b/libs/libupnpp/Makefile @@ -8,18 +8,19 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libupnpp -PKG_VERSION:=0.17.0 +PKG_VERSION:=0.17.1 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.lesbonscomptes.com/upmpdcli/downloads -PKG_HASH:=7035dda48207c254cbd8cd64e4e679a9e5f085a35d28c19bc2ddeba0deaff58b +PKG_HASH:=90403b55583e932a9a04905a01bf452016a56aecbeade5c9e1454a5fbb6f01b0 + PKG_MAINTAINER:=Petko Bordjukov <bordjukov@gmail.com> -PKG_LICENSE:=GPL-2.0 +PKG_LICENSE:=LGPL-2.1-or-later PKG_LICENSE_FILES:=COPYING -PKG_FIXUP:=autoreconf PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 include $(INCLUDE_DIR)/package.mk diff --git a/libs/lmdb/Makefile b/libs/lmdb/Makefile index 0e1fc0df7..acb77ad5b 100644 --- a/libs/lmdb/Makefile +++ b/libs/lmdb/Makefile @@ -1,12 +1,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=lmdb -PKG_VERSION:=0.9.23 +PKG_VERSION:=0.9.24 PKG_RELEASE:=1 PKG_SOURCE:=LMDB_$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/LMDB/lmdb/tar.gz/LMDB_$(PKG_VERSION)? -PKG_HASH:=abf42e91f046787ed642d9eb21812a5c473f3ba5854124484d16eadbe0aa9c81 +PKG_HASH:=44602436c52c29d4f301f55f6fd8115f945469b868348e3cddaf91ab2473ea26 PKG_BUILD_DIR:=$(BUILD_DIR)/lmdb-LMDB_$(PKG_VERSION) PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec@nic.cz> diff --git a/libs/oniguruma/Makefile b/libs/oniguruma/Makefile index ab92aff78..f782705b5 100644 --- a/libs/oniguruma/Makefile +++ b/libs/oniguruma/Makefile @@ -5,12 +5,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=oniguruma -PKG_VERSION:=6.9.2 -PKG_RELEASE:=2 +PKG_VERSION:=6.9.3 +PKG_RELEASE:=1 PKG_SOURCE:=onig-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/kkos/oniguruma/tar.gz/v$(PKG_VERSION)? -PKG_HASH:=3b568a9050839e7828b2f2d5bc9cd3650979b6b54a080f54c515320dddda06b0 +PKG_HASH:=dc6dec742941e24b761cea1b9a2f12e750879107ae69fd80ae1046459d4fb1db PKG_MAINTAINER:=Eneas U de Queiroz <cotequeiroz@gmail.com> PKG_LICENSE:=BSD-2-Clause diff --git a/libs/oniguruma/patches/001-Fix-CVE-2019-13225-problem-in-converting-if-then-els.patch b/libs/oniguruma/patches/001-Fix-CVE-2019-13225-problem-in-converting-if-then-els.patch deleted file mode 100644 index e7cf9d00a..000000000 --- a/libs/oniguruma/patches/001-Fix-CVE-2019-13225-problem-in-converting-if-then-els.patch +++ /dev/null @@ -1,66 +0,0 @@ -From c509265c5f6ae7264f7b8a8aae1cfa5fc59d108c Mon Sep 17 00:00:00 2001 -From: "K.Kosako" <kosako@sofnec.co.jp> -Date: Thu, 27 Jun 2019 14:11:55 +0900 -Subject: [PATCH] Fix CVE-2019-13225: problem in converting if-then-else - pattern to bytecode. - - -diff --git a/src/regcomp.c b/src/regcomp.c -index c2c04a4..ff3431f 100644 ---- a/src/regcomp.c -+++ b/src/regcomp.c -@@ -1307,8 +1307,9 @@ compile_length_bag_node(BagNode* node, regex_t* reg) - len += tlen; - } - -+ len += SIZE_OP_JUMP + SIZE_OP_ATOMIC_END; -+ - if (IS_NOT_NULL(Else)) { -- len += SIZE_OP_JUMP; - tlen = compile_length_tree(Else, reg); - if (tlen < 0) return tlen; - len += tlen; -@@ -1455,7 +1456,7 @@ compile_bag_node(BagNode* node, regex_t* reg, ScanEnv* env) - - case BAG_IF_ELSE: - { -- int cond_len, then_len, jump_len; -+ int cond_len, then_len, else_len, jump_len; - Node* cond = NODE_BAG_BODY(node); - Node* Then = node->te.Then; - Node* Else = node->te.Else; -@@ -1472,8 +1473,7 @@ compile_bag_node(BagNode* node, regex_t* reg, ScanEnv* env) - else - then_len = 0; - -- jump_len = cond_len + then_len + SIZE_OP_ATOMIC_END; -- if (IS_NOT_NULL(Else)) jump_len += SIZE_OP_JUMP; -+ jump_len = cond_len + then_len + SIZE_OP_ATOMIC_END + SIZE_OP_JUMP; - - r = add_op(reg, OP_PUSH); - if (r != 0) return r; -@@ -1490,11 +1490,20 @@ compile_bag_node(BagNode* node, regex_t* reg, ScanEnv* env) - } - - if (IS_NOT_NULL(Else)) { -- int else_len = compile_length_tree(Else, reg); -- r = add_op(reg, OP_JUMP); -- if (r != 0) return r; -- COP(reg)->jump.addr = else_len + SIZE_INC_OP; -+ else_len = compile_length_tree(Else, reg); -+ if (else_len < 0) return else_len; -+ } -+ else -+ else_len = 0; - -+ r = add_op(reg, OP_JUMP); -+ if (r != 0) return r; -+ COP(reg)->jump.addr = SIZE_OP_ATOMIC_END + else_len + SIZE_INC_OP; -+ -+ r = add_op(reg, OP_ATOMIC_END); -+ if (r != 0) return r; -+ -+ if (IS_NOT_NULL(Else)) { - r = compile_tree(Else, reg, env); - } - } diff --git a/libs/oniguruma/patches/002-Fix-CVE-2019-13224-don-t-allow-different-encodings-f.patch b/libs/oniguruma/patches/002-Fix-CVE-2019-13224-don-t-allow-different-encodings-f.patch deleted file mode 100644 index dd005bb75..000000000 --- a/libs/oniguruma/patches/002-Fix-CVE-2019-13224-don-t-allow-different-encodings-f.patch +++ /dev/null @@ -1,38 +0,0 @@ -From 0f7f61ed1b7b697e283e37bd2d731d0bd57adb55 Mon Sep 17 00:00:00 2001 -From: "K.Kosako" <kosako@sofnec.co.jp> -Date: Thu, 27 Jun 2019 17:25:26 +0900 -Subject: [PATCH] Fix CVE-2019-13224: don't allow different encodings for - onig_new_deluxe() - - -diff --git a/src/regext.c b/src/regext.c -index fa4b360..965c793 100644 ---- a/src/regext.c -+++ b/src/regext.c -@@ -29,6 +29,7 @@ - - #include "regint.h" - -+#if 0 - static void - conv_ext0be32(const UChar* s, const UChar* end, UChar* conv) - { -@@ -158,6 +159,7 @@ conv_encoding(OnigEncoding from, OnigEncoding to, const UChar* s, const UChar* e - - return ONIGERR_NOT_SUPPORTED_ENCODING_COMBINATION; - } -+#endif - - extern int - onig_new_deluxe(regex_t** reg, const UChar* pattern, const UChar* pattern_end, -@@ -169,9 +171,7 @@ onig_new_deluxe(regex_t** reg, const UChar* pattern, const UChar* pattern_end, - if (IS_NOT_NULL(einfo)) einfo->par = (UChar* )NULL; - - if (ci->pattern_enc != ci->target_enc) { -- r = conv_encoding(ci->pattern_enc, ci->target_enc, pattern, pattern_end, -- &cpat, &cpat_end); -- if (r != 0) return r; -+ return ONIGERR_NOT_SUPPORTED_ENCODING_COMBINATION; - } - else { - cpat = (UChar* )pattern; diff --git a/libs/openldap/Makefile b/libs/openldap/Makefile index f667d7670..d7db725ce 100644 --- a/libs/openldap/Makefile +++ b/libs/openldap/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=openldap PKG_VERSION:=2.4.47 -PKG_RELEASE:=2 +PKG_RELEASE:=3 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tgz PKG_SOURCE_URL:=https://gpl.savoirfairelinux.net/pub/mirrors/openldap/openldap-release/ \ diff --git a/libs/openldap/patches/800-implicit.patch b/libs/openldap/patches/800-implicit.patch new file mode 100644 index 000000000..6a39d78b4 --- /dev/null +++ b/libs/openldap/patches/800-implicit.patch @@ -0,0 +1,10 @@ +--- a/libraries/libldap/tls2.c ++++ b/libraries/libldap/tls2.c +@@ -41,6 +41,7 @@ static tls_impl *tls_imp = &ldap_int_tls_impl; + #define HAS_TLS( sb ) ber_sockbuf_ctrl( sb, LBER_SB_OPT_HAS_IO, \ + (void *)tls_imp->ti_sbio ) + ++static int ldap_pvt_tls_check_hostname( LDAP *ld, void *s, const char *name_in ); + #endif /* HAVE_TLS */ + + #ifdef LDAP_DEVEL diff --git a/libs/redis/Makefile b/libs/redis/Makefile new file mode 100644 index 000000000..399e5d55f --- /dev/null +++ b/libs/redis/Makefile @@ -0,0 +1,94 @@ +include $(TOPDIR)/rules.mk + +PKG_NAME:=redis +PKG_VERSION:=5.0.5 +PKG_RELEASE:=2 + +PKG_SOURCE_URL:=http://download.redis.io/releases/ +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_HASH:=2139009799d21d8ff94fc40b7f36ac46699b9e1254086299f8d3b223ca54a375 + +PKG_MAINTAINER:=Jan Pavlinec <jan.pavlinec@nic.cz> +PKG_LICENSE:=BSD-3-Clause +PKG_LICENSE_FILES:=COPYING +PKG_INSTALL:=1 + +include $(INCLUDE_DIR)/package.mk + +MAKE_FLAGS+= \ + MALLOC="libc" \ + USE_JEMALLOC="no" \ + PREFIX="$(PKG_INSTALL_DIR)/usr" \ + ARCH="" + +define Package/redis/Default + SUBMENU:=Database + SECTION:=libs + CATEGORY:=Libraries + URL:=https://redis.io +endef + +define Package/redis-server +$(call Package/redis/Default) + TITLE:=Redis server + DEPENDS:=+libpthread +endef + +define Package/redis-cli +$(call Package/redis/Default) + TITLE:=Redis cli +endef + +define Package/redis-utils +$(call Package/redis/Default) + TITLE:=Redis utilities + DEPENDS:=+redis-server +endef + +define Package/redis-full +$(call Package/redis/Default) + TITLE:=All Redis binaries (server,cli and utils) + DEPENDS:=+redis-utils +redis-cli +endef + +define Package/redis-full/description + Redis is an open source, BSD licensed, advanced key-value cache and store. + It is often referred to as a data structure server since keys can contain + strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. +endef + +define Package/redis-server/conffiles +/etc/redis.conf +endef + +define Build/Compile + $(MAKE) -C "$(PKG_BUILD_DIR)/deps/hiredis" static $(MAKE_FLAGS) $(MAKE_VARS) + $(MAKE) -C "$(PKG_BUILD_DIR)/deps/linenoise" $(MAKE_FLAGS) $(MAKE_VARS) + $(MAKE) -C "$(PKG_BUILD_DIR)/deps/lua" posix $(MAKE_FLAGS) $(MAKE_VARS) AR="${AR} ru" + $(call Build/Compile/Default) +endef + +define Package/redis-server/install + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/redis-server $(1)/usr/bin/ + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) ./files/redis.init $(1)/etc/init.d/redis + $(INSTALL_DIR) $(1)/etc + $(INSTALL_DATA) $(PKG_BUILD_DIR)/redis.conf $(1)/etc/ + $(SED) "s|^dir .*|dir /var/lib/redis|" $(1)/etc/redis.conf +endef + +define Package/redis-cli/install + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/redis-cli $(1)/usr/bin/ +endef + +define Package/redis-utils/install + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/redis-{check-aof,benchmark} $(1)/usr/bin/ +endef + +$(eval $(call BuildPackage,redis-full)) +$(eval $(call BuildPackage,redis-server)) +$(eval $(call BuildPackage,redis-cli)) +$(eval $(call BuildPackage,redis-utils)) diff --git a/libs/redis/files/redis.init b/libs/redis/files/redis.init new file mode 100755 index 000000000..686514d79 --- /dev/null +++ b/libs/redis/files/redis.init @@ -0,0 +1,17 @@ +#!/bin/sh /etc/rc.common + +USE_PROCD=1 +START=95 +STOP=10 + +REDIS_BIN="/usr/bin/redis-server" +REDIS_CONFIG="/etc/redis.conf" +REDIS_PID="/var/run/redis.pid" +REDIS_DATA="/var/lib/redis" + +start_service() { + mkdir -p "$REDIS_DATA" + procd_open_instance redis + procd_set_param command "$REDIS_BIN" "$REDIS_CONFIG" + procd_close_instance +} diff --git a/libs/redis/patches/020-fix-atomicvar.patch b/libs/redis/patches/020-fix-atomicvar.patch new file mode 100644 index 000000000..ad6519e48 --- /dev/null +++ b/libs/redis/patches/020-fix-atomicvar.patch @@ -0,0 +1,22 @@ +Index: redis-5.0.0/src/atomicvar.h +=================================================================== +--- redis-5.0.0.orig/src/atomicvar.h ++++ redis-5.0.0/src/atomicvar.h +@@ -68,7 +68,7 @@ + * is reported. */ + // #define __ATOMIC_VAR_FORCE_SYNC_MACROS + +-#if !defined(__ATOMIC_VAR_FORCE_SYNC_MACROS) && defined(__ATOMIC_RELAXED) && !defined(__sun) && (!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057) ++#if defined(CONFIG_EDAC_ATOMIC_SCRUB) && !defined(__ATOMIC_VAR_FORCE_SYNC_MACROS) && defined(__ATOMIC_RELAXED) && !defined(__sun) && (!defined(__clang__) || !defined(__APPLE__) || __apple_build_version__ > 4210057) + /* Implementation using __atomic macros. */ + + #define atomicIncr(var,count) __atomic_add_fetch(&var,(count),__ATOMIC_RELAXED) +@@ -82,7 +82,7 @@ + #define atomicSet(var,value) __atomic_store_n(&var,value,__ATOMIC_RELAXED) + #define REDIS_ATOMIC_API "atomic-builtin" + +-#elif defined(HAVE_ATOMIC) ++#elif defined(CONFIG_EDAC_ATOMIC_SCRUB) && defined(HAVE_ATOMIC) + /* Implementation using __sync macros. */ + + #define atomicIncr(var,count) __sync_add_and_fetch(&var,(count))
\ No newline at end of file diff --git a/libs/redis/patches/030-fix-uclibc-compilation.patch b/libs/redis/patches/030-fix-uclibc-compilation.patch new file mode 100644 index 000000000..0fc39b02d --- /dev/null +++ b/libs/redis/patches/030-fix-uclibc-compilation.patch @@ -0,0 +1,25 @@ +--- a/src/config.h ++++ b/src/config.h +@@ -30,6 +30,10 @@ + #ifndef __CONFIG_H + #define __CONFIG_H + ++#if defined(__unix) || defined(__linux__) ++#include <features.h> ++#endif ++ + #ifdef __APPLE__ + #include <AvailabilityMacros.h> + #endif +@@ -62,9 +66,9 @@ + #endif + + /* Test for backtrace() */ +-#if defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) || \ ++#if (defined(__APPLE__) || (defined(__linux__) && defined(__GLIBC__)) || \ + defined(__FreeBSD__) || (defined(__OpenBSD__) && defined(USE_BACKTRACE))\ +- || defined(__DragonFly__) ++ || defined(__DragonFly__)) && !defined(__UCLIBC__) + #define HAVE_BACKTRACE 1 + #endif + diff --git a/libs/yajl/Makefile b/libs/yajl/Makefile index 843b5967f..d3972542d 100644 --- a/libs/yajl/Makefile +++ b/libs/yajl/Makefile @@ -1,4 +1,4 @@ -# +# # Copyright (C) 2014, 2015 OpenWrt.org # # This is free software, licensed under the GNU General Public License v2. @@ -9,18 +9,19 @@ include $(TOPDIR)/rules.mk PKG_NAME:=yajl PKG_VERSION:=2.1.0 -PKG_RELEASE:=1 +PKG_RELEASE:=2 + +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/lloyd/yajl +PKG_SOURCE_VERSION:=$(PKG_VERSION) +PKG_MIRROR_HASH:=0cd74320be0270a07931e42d2f14f87a8b3fb664ecb5db58b0e838886211ab1f + PKG_MAINTAINER:=Charles Southerland <charlie@stuphlabs.com> PKG_LICENSE:=ISC PKG_LICENSE_FILES:=COPYING -PKG_REV:=66cb08ca2ad8581080b626a75dfca266a890afb2 -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_MIRROR_HASH:=95bfdb37f864318fc3c2ee736a747d4902d279a88f361770c89e60ff5e1d6f63 -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_SOURCE_VERSION:=$(PKG_REV) -PKG_SOURCE_URL:=git://github.com/lloyd/yajl.git -PKG_SOURCE_PROTO:=git +PKG_BUILD_PARALLEL:=1 +CMAKE_INSTALL:=1 include $(INCLUDE_DIR)/package.mk include $(INCLUDE_DIR)/cmake.mk @@ -29,7 +30,7 @@ define Package/yajl SECTION:=libs CATEGORY:=Libraries TITLE:=Yet Another JSON Library - URL:=http://lloyd.github.io/yajl + URL:=https://lloyd.github.io/yajl endef define Package/yajl/description @@ -40,18 +41,6 @@ YAJL is released under the ISC license. YAJL was created by Lloyd Hilaiel. endef -PKG_INSTALL:=1 - -CMAKE_OPTIONS += \ - -DCMAKE_BUILD_TYPE:String="Release" - -define Build/InstallDev - $(INSTALL_DIR) $(1)/usr/include - $(CP) $(PKG_INSTALL_DIR)/usr/include/yajl $(1)/usr/include/ - $(INSTALL_DIR) $(1)/usr/lib - $(CP) $(PKG_INSTALL_DIR)/usr/lib/libyajl.so* $(1)/usr/lib/ -endef - define Package/yajl/install $(INSTALL_DIR) $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/lib/libyajl.so* $(1)/usr/lib/ diff --git a/libs/zmq/Makefile b/libs/zmq/Makefile index 3ac17529b..a202d1a38 100644 --- a/libs/zmq/Makefile +++ b/libs/zmq/Makefile @@ -1,4 +1,4 @@ -# +# # Copyright (C) 2016 OpenWrt.org # # This is free software, licensed under the GNU General Public License v2. @@ -11,25 +11,22 @@ include $(TOPDIR)/rules.mk PKG_NAME:=zeromq PKG_VERSION:=4.1.7 -PKG_RELEASE:=1 -PKG_MAINTAINER:=Dirk Chang <dirk@kooiot.com> -PKG_LICENSE:=GPL-3.0+ -PKG_LICENSE_FILES:=LICENCE.txt - -PKG_CPE_ID:=cpe:/a:zeromq:libzmq +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://github.com/zeromq/zeromq4-1/releases/download/v$(PKG_VERSION) PKG_HASH:=31c383cfcd3be1dc8a66e448c403029e793687e70473b89c4cc0bd626e7da299 - PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) -PKG_FIXUP:=autoreconf +PKG_MAINTAINER:=Dirk Chang <dirk@kooiot.com> +PKG_LICENSE:=GPL-3.0-or-later +PKG_LICENSE_FILES:=LICENCE.txt +PKG_CPE_ID:=cpe:/a:zeromq:libzmq PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 +PKG_REMOVE_FILES:=autogen.sh acinclude.m4 aclocal.m4 - -include $(INCLUDE_DIR)/uclibc++.mk include $(INCLUDE_DIR)/package.mk define Package/libzmq/default @@ -37,7 +34,7 @@ define Package/libzmq/default URL:=http://www.zeromq.org/ SECTION:=libs CATEGORY:=Libraries - DEPENDS:=+libuuid +libpthread +librt $(CXX_DEPENDS) + DEPENDS:=+libuuid +libpthread +librt +libstdcpp PROVIDES:=libzmq endef @@ -65,7 +62,10 @@ endef # add extra configure flags here CONFIGURE_ARGS += \ --enable-static \ - --enable-shared + --enable-shared \ + --with-pic \ + --with-relaxed \ + --without-documentation ifeq ($(BUILD_VARIANT),curve) CONFIGURE_ARGS+= --with-libsodium @@ -73,6 +73,8 @@ else CONFIGURE_ARGS+= --without-libsodium endif +TARGET_CXXFLAGS += -Wno-error=cpp + define Build/InstallDev $(INSTALL_DIR) $(1)/usr/include $(CP) $(PKG_INSTALL_DIR)/usr/include/zmq.h $(1)/usr/include diff --git a/libs/zmq/patches/010-disable_pedantic_on_linux_with_ulibc++.patch b/libs/zmq/patches/010-disable_pedantic_on_linux_with_ulibc++.patch deleted file mode 100644 index 391941fb5..000000000 --- a/libs/zmq/patches/010-disable_pedantic_on_linux_with_ulibc++.patch +++ /dev/null @@ -1,14 +0,0 @@ ---- a/configure.ac -+++ b/configure.ac -@@ -150,8 +150,10 @@ case "${host_os}" in - *linux*) - # Define on Linux to enable all library features. Define if using a gnu compiler - if test "x$GXX" = "xyes"; then -- CPPFLAGS="-D_GNU_SOURCE $CPPFLAGS" -+ CPPFLAGS="-D_GNU_SOURCE $CPPFLAGS -Wno-long-long" - fi -+ libzmq_pedantic="no" -+ libzmq_werror="no" - AC_DEFINE(ZMQ_HAVE_LINUX, 1, [Have Linux OS]) - libzmq_on_linux="yes" - diff --git a/libs/zmq/patches/020-map_with_const_string_with_ublic++.patch b/libs/zmq/patches/020-map_with_const_string_with_ublic++.patch index 8a47aa1ad..a00716060 100644 --- a/libs/zmq/patches/020-map_with_const_string_with_ublic++.patch +++ b/libs/zmq/patches/020-map_with_const_string_with_ublic++.patch @@ -12,16 +12,6 @@ metadata_t (const dict_t &dict); virtual ~metadata_t (); ---- a/src/socket_base.cpp -+++ b/src/socket_base.cpp -@@ -30,6 +30,7 @@ - #include <new> - #include <string> - #include <algorithm> -+#include <ctype.h> - - #include "platform.hpp" - --- a/src/stream_engine.cpp +++ b/src/stream_engine.cpp @@ -208,7 +208,11 @@ void zmq::stream_engine_t::plug (io_thread_t *io_thread_, @@ -36,7 +26,7 @@ zmq_assert (metadata == NULL); metadata = new (std::nothrow) metadata_t (properties); } -@@ -815,7 +815,11 @@ void zmq::stream_engine_t::mechanism_ready () +@@ -824,7 +828,11 @@ void zmq::stream_engine_t::mechanism_ready () // If we have a peer_address, add it to metadata if (!peer_address.empty()) { diff --git a/libs/zmq/patches/050-nanosleep.patch b/libs/zmq/patches/050-nanosleep.patch new file mode 100644 index 000000000..5e48c0753 --- /dev/null +++ b/libs/zmq/patches/050-nanosleep.patch @@ -0,0 +1,54 @@ +--- a/src/signaler.cpp ++++ b/src/signaler.cpp +@@ -86,7 +86,8 @@ static int sleep_ms (unsigned int ms_) + usleep (ms_ * 1000); + return 0; + #else +- return usleep (ms_ * 1000); ++ const struct timespec req = {0, (long int)ms_ * 1000 * 1000}; ++ return nanosleep (&req, NULL); + #endif + } + +--- a/src/tcp_address.cpp ++++ b/src/tcp_address.cpp +@@ -29,6 +29,7 @@ + + #include <string> + #include <sstream> ++#include <ctime> + + #include "tcp_address.hpp" + #include "platform.hpp" +@@ -194,7 +195,8 @@ int zmq::tcp_address_t::resolve_nic_name (const char *nic_, bool ipv6_, bool is_ + rc = getifaddrs (&ifa); + if (rc == 0 || (rc < 0 && errno != ECONNREFUSED)) + break; +- usleep ((backoff_msec << i) * 1000); ++ const struct timespec req = {0, (backoff_msec << i) * 1000 * 1000}; ++ nanosleep (&req, NULL); + } + errno_assert (rc == 0); + zmq_assert (ifa != NULL); +--- a/src/zmq.cpp ++++ b/src/zmq.cpp +@@ -692,7 +692,8 @@ int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_) + usleep (timeout_ * 1000); + return 0; + #else +- return usleep (timeout_ * 1000); ++ const struct timespec req = {0, timeout_ * 1000 * 1000}; ++ return nanosleep (&req, NULL); + #endif + } + +@@ -852,7 +853,8 @@ int zmq_poll (zmq_pollitem_t *items_, int nitems_, long timeout_) + Sleep (timeout_ > 0 ? timeout_ : INFINITE); + return 0; + #else +- return usleep (timeout_ * 1000); ++ const struct timespec req = {0, timeout_ * 1000 * 1000}; ++ return nanosleep (&req, NULL); + #endif + } + zmq::clock_t clock; diff --git a/mail/msmtp-scripts/Makefile b/mail/msmtp-scripts/Makefile index 3e38d4896..42ed208ed 100644 --- a/mail/msmtp-scripts/Makefile +++ b/mail/msmtp-scripts/Makefile @@ -1,7 +1,6 @@ # # Copyright (C) 2009 David Cooper <dave@kupesoft.com> -# Copyright (C) 2009-2015 OpenWrt.org -# Copyright (C) 2016 Daniel Dickinson <cshored@thecshore.com> +# Copyright (C) 2016-2019 Daniel Dickinson <cshored@thecshore.com> # # This is free software, licensed under the GNU General Public License v2. # See /LICENSE for more information. @@ -10,14 +9,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=msmtp-scripts -PKG_VERSION:=1.0.8 +PKG_VERSION:=1.2.4 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=@SF/msmtp-scripts -PKG_HASH:=2aec48d47b02facf2a33cf97a7434e969c1a054224406e6c55320d825c7902b2 +PKG_SOURCE_URL:=https://launchpad.net/$(PKG_NAME)/1.2/$(PKG_VERSION)/+download +PKG_HASH:=fc85ab8ed1348be584adfc1feb89f51daed7404e9e8643652ff31d2af00f1cf5 +PKG_MAINTAINER:=Daniel F. Dickinson <cshored@thecshore.com> -PKG_LICENSE:=GPL-3.0+ +PKG_LICENSE:=GPL-3.0-or-later PKG_LICENSE_FILES:=COPYING include $(INCLUDE_DIR)/package.mk @@ -25,24 +25,20 @@ include $(INCLUDE_DIR)/package.mk define Package/msmtp-scripts/Default SECTION:=mail CATEGORY:=Mail - TITLE:=DEPRECATED: Simple sendmail SMTP queueing and forwarding - URL:=http://msmtp-scripts.sourceforge.net/ + TITLE:=Forwarding only SMTP with queuing + URL:=https://msmtp-scripts.thecshore.com endef define Package/msmtp-scripts/Default/description - DEPRECATED: SourceForge project is abandonded; and upstream (on GitHub) - has deprecated this project. See: - https://github.com/cshore-history/msmtp-scripts#deprecation-notice - msmtp-scripts are scripts wrappers around the msmtp SMTP client that - add queueing, logging to syslog or file, a subset of sendmail/postfix + add queueing, logging to syslog or file, and a subset of sendmail/postfix mailq/postsuper/postqueue commands implemented in a compatible fashion. endef define Package/msmtpq-ng $(call Package/msmtp-scripts/Default) - DEPENDS+= @(PACKAGE_msmtp||PACKAGE_msmtp-nossl) - TITLE+= (msmtpq-ng wrappers) + DEPENDS+= +msmtp + TITLE+= (common) endef define Package/msmtpq-ng/conffiles @@ -60,10 +56,15 @@ define Package/msmtpq-ng-mta $(call Package/msmtp-scripts/Default) TITLE+= (as MTA) DEPENDS+=+msmtpq-ng - USERID:=msmtp=482:msmtp=482 + ALTERNATIVES:=\ + 400:/usr/sbin/sendmail:/usr/sbin/msmtpq-ng-mta \ + 400:/usr/lib/sendmail:/usr/sbin/msmtpq-ng-mta \ + 400:/usr/sbin/mailq:/usr/sbin/msmtpq-ng-queue-mta \ + 400:/usr/sbin/postqueue:/usr/sbin/msmtpq-ng-queue-mta \ + 400:/usr/sbin/postsuper:/usr/sbin/msmtpq-ng-queue-mta endef -define Package/msmtp-queue-mta/conffiles +define Package/msmtpq-ng-mta/conffiles /etc/msmtpq-ng-mta.rc endef @@ -78,7 +79,8 @@ endef define Package/msmtpq-ng-mta-smtpd $(call Package/msmtp-scripts/Default) DEPENDS+= +msmtpq-ng-mta +xinetd - TITLE+= (basic SMTP server) + TITLE+= (localhost SMTPd) + USERID:=msmtp=482:msmtp=482 endef define Package/msmtp-ng-mta-smtpd/description @@ -92,13 +94,13 @@ define Package/msmtp-ng-mta-smtpd/description the hold queue before it can be delivered. endef -define Package/msmtpq-ng-mta/postinst - mkdir -p $${IPKG_INSTROOT}/etc/crontabs - if ! grep -q msmtpq-ng-mta $${IPKG_INSTROOT}/etc/crontabs/root; then echo $$'\n'"*/60 * * * * /usr/bin/msmtpq-ng-mta -q" >>$${IPKG_INSTROOT}/etc/crontabs/root; fi +define Package/msmtpq-ng-mta-smtpd/conffiles +/etc/xinetd.d/ms-mta-smtpd endef -define Package/msmtp-queue-mta/prerm - if grep -q msmtpq-ng-mta $${IPKG_INSTROOT}/etc/crontabs/root; then grep -v '\*/60 \* \* \* \* /usr/bin/msmtpq-ng-mta -q' $${IPKG_INSTROOT}/etc/crontabs/root >$${IPKG_INSTROOT}/etc/crontabs/root.new; mv -f $${IPKG_INSTROOT}/etc/crontabs/root.new $${IPKG_INSTROOT}/etc/crontabs; fi +define Package/msmtpq-ng-mta/postinst + mkdir -p $${IPKG_INSTROOT}/etc/crontabs + if ! grep -q msmtpq-ng-mta $${IPKG_INSTROOT}/etc/crontabs/root 2>/dev/null; then echo $$'\n'"*/60 * * * * /usr/bin/msmtpq-ng-mta -q" >>$${IPKG_INSTROOT}/etc/crontabs/root; fi endef define Build/Configure @@ -111,31 +113,24 @@ endef define Package/msmtpq-ng/install $(INSTALL_DIR) $(1)/etc - $(INSTALL_CONF) ./files/msmtpq-ng.rc $(1)/etc/msmtpq-ng.rc + $(INSTALL_DATA) ./files/msmtpq-ng.rc $(1)/etc/msmtpq-ng.rc $(INSTALL_DIR) $(1)/usr/bin - $(CP) $(PKG_BUILD_DIR)/msmtpq-ng/msmtpq-ng $(1)/usr/bin/ - $(SED) 's/logger -i/logger/' $(1)/usr/bin/msmtpq-ng - $(CP) $(PKG_BUILD_DIR)/msmtpq-ng/msmtpq-ng-queue $(1)/usr/bin/ + $(CP) $(PKG_BUILD_DIR)/src/usr/bin/msmtpq-ng $(1)/usr/bin/ + $(CP) $(PKG_BUILD_DIR)/src/usr/bin/msmtpq-ng-queue $(1)/usr/bin/ endef define Package/msmtpq-ng-mta/install $(INSTALL_DIR) $(1)/usr/bin $(1)/usr/sbin $(1)/usr/lib $(1)/etc/init.d - $(INSTALL_CONF) $(PKG_BUILD_DIR)/msmtpq-ng-mta/msmtpq-ng-mta.rc $(1)/etc/ - echo 'MSMTP_LOCK_DIR=/var/lock/msmtp' >>$(1)/etc/msmtpq-ng-mta.rc - $(INSTALL_BIN) $(PKG_BUILD_DIR)/msmtpq-ng-mta/msmtpq-ng-mta $(1)/usr/bin/ - $(INSTALL_BIN) $(PKG_BUILD_DIR)/msmtpq-ng-mta/msmtpq-ng-queue-mta $(1)/usr/bin/ + $(INSTALL_DATA) ./files/msmtpq-ng-mta.rc $(1)/etc/ + $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/usr/sbin/msmtpq-ng-mta $(1)/usr/sbin/ + $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/usr/sbin//msmtpq-ng-queue-mta $(1)/usr/sbin/ $(INSTALL_DIR) $(1)/etc/crontabs $(INSTALL_BIN) ./files/msmtpq-ng-mta.init $(1)/etc/init.d/msmtpq-ng-mta - ln -sf ../bin/msmtpq-ng-mta $(1)/usr/sbin/sendmail - ln -sf ../bin/msmtpq-ng-mta $(1)/usr/lib/sendmail - ln -sf ../bin/msmtpq-ng-queue-mta $(1)/usr/sbin/mailq - ln -sf ../bin/msmtpq-ng-queue-mta $(1)/usr/sbin/postqueue - ln -sf ../bin/msmtpq-ng-queue-mta $(1)/usr/sbin/postsuper endef define Package/msmtpq-ng-mta-smtpd/install $(INSTALL_DIR) $(1)/etc/xinetd.d - $(INSTALL_BIN) $(PKG_BUILD_DIR)/msmtpq-ng-mta/sendmail-bs.xinetd $(1)/etc/xinetd.d/msmtpq-ng-mta-smtpd + $(INSTALL_BIN) $(PKG_BUILD_DIR)/src/etc/xinetd.d/ms-mta-smtpd $(1)/etc/xinetd.d/ms-mta-smtpd endef diff --git a/mail/msmtp-scripts/files/msmtpq-ng-mta.init b/mail/msmtp-scripts/files/msmtpq-ng-mta.init index 944164c20..3012a28cb 100644 --- a/mail/msmtp-scripts/files/msmtpq-ng-mta.init +++ b/mail/msmtp-scripts/files/msmtpq-ng-mta.init @@ -5,13 +5,11 @@ START=90 boot() { [ ! -d /var/spool/msmtp ] && { - mkdir -m 0770 -p /var/spool/msmtp - chown msmtp:msmtp /var/spool/msmtp + mkdir -m1777 -p /var/spool/msmtp } [ ! -d /var/lock/msmtp ] && { - mkdir -m 0770 -p /var/lock/msmtp - chown msmtp:msmtp /var/lock/msmtp + mkdir -m1777 -p /var/lock/msmtp } } diff --git a/mail/msmtp-scripts/files/msmtpq-ng-mta.rc b/mail/msmtp-scripts/files/msmtpq-ng-mta.rc new file mode 100644 index 000000000..f4637ff42 --- /dev/null +++ b/mail/msmtp-scripts/files/msmtpq-ng-mta.rc @@ -0,0 +1,22 @@ +#!/bin/sh + +#Q=/var/spool/msmtp/"$(id -un)" +#LOG=syslog +#MAXLOGLEVEL=7 +#MSMTP_LOCK_DIR=/var/lock/msmtp/"$(id -un)" +#MSMTP_UMASK=077 +#MSMTP_LOG_UMASK=007 +#MSMTP_QUEUE_QUIET=true +#MSMTP_IGNORE_NO_RECIPIENTS=true +#MSMTP_QUEUE_ONLY=false +#MSMTP_SEND_DELAY=0 +#MSMTP_MAXIMUM_QUEUE_LIFETIME=345600 # Four days +#MSMTPQ_NG=msmtpq-ng +#MSMTPQ_NG_QUEUE=msmtpq-ng-queue +#MSMTP_CONF=/etc/msmtprc +#EMAIL_CONN_TEST=p +EMAIL_CONN_TEST_PING=openwrt.org +#EMAIL_CONN_TEST_IP=8.8.8.8 +#EMAIL_CONN_TEST_SITE=www.debian.org +#MSMTP_HOLD_SMTP_MAIL=true +#MSMTP_HOLD_CLI_MAIL=false diff --git a/mail/msmtp-scripts/files/msmtpq-ng.rc b/mail/msmtp-scripts/files/msmtpq-ng.rc index 179002126..33d721c28 100644 --- a/mail/msmtp-scripts/files/msmtpq-ng.rc +++ b/mail/msmtp-scripts/files/msmtpq-ng.rc @@ -1,14 +1,17 @@ +#!/bin/sh + #Q=~/msmtp.queue #LOG=~/log/.msmtp.queue.log #MAXLOGLEVEL=7 -#MSMTP_LOCKDIR=/var/lock +#MSMTP_LOCK_DIR=~/.msmtp.lock EMAIL_CONN_TEST=p -EMAIL_CONN_TEST_SITE=www.lede-project.org +EMAIL_CONN_TEST_PING=openwrt.org #EMAIL_CONN_TEST_IP=8.8.8.8 +#EMAIL_CONN_TEST_SITE=www.debian.org #MSMTP_UMASK=077 #MSMTP_LOG_UMASK=077 #MSMTP_QUEUE_QUIET=false -#MSMTP_IGNORE_NO_RECIPIENTS=false +#MSMTP_IGNORE_NO_RECIPIENTS=true #MSMTP_QUEUE_ONLY=false #MSMTP_SEND_DELAY=0 #MSMTP_MAXIMUM_QUEUE_LIFETIME=345600 # Four days @@ -16,3 +19,5 @@ EMAIL_CONN_TEST_SITE=www.lede-project.org #MSMTPQ_NG_QUEUE=msmtpq-ng-queue #MSMTP_HOLD_SMTP_MAIL=true #MSMTP_HOLD_CLI_MAIL=false +#MSMTP_CONF=/etc/msmtprc +#LOCK_CMD=flock diff --git a/mail/msmtp/Makefile b/mail/msmtp/Makefile index 17b1ae17d..3595949a4 100644 --- a/mail/msmtp/Makefile +++ b/mail/msmtp/Makefile @@ -10,7 +10,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=msmtp PKG_VERSION:=1.8.5 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://marlam.de/msmtp/releases @@ -49,6 +49,7 @@ $(call Package/msmtp/Default) DEPENDS+= +libgnutls +ca-bundle TITLE+= (with SSL support) VARIANT:=ssl + DEFAULT_VARIANT:=1 endef define Package/msmtp/conffiles @@ -64,6 +65,7 @@ define Package/msmtp-nossl $(call Package/msmtp/Default) TITLE+= (without SSL support) VARIANT:=nossl + PROVIDES:=msmtp endef define Package/msmtp-nossl/description @@ -74,7 +76,10 @@ endef define Package/msmtp-mta $(call Package/msmtp/Default) TITLE+= (as MTA) - DEPENDS+=@(PACKAGE_msmtp||PACKAGE_msmtp-nossl) + DEPENDS+=+msmtp + ALTERNATIVES:=\ + 100:/usr/sbin/sendmail:/usr/bin/msmtp \ + 100:/usr/lib/sendmail:/usr/bin/msmtp endef define Package/msmtp-mta/description @@ -85,7 +90,7 @@ endef define Package/msmtp-queue $(call Package/msmtp/Default) - DEPENDS+= +bash @(PACKAGE_msmtp||PACKAGE_msmtp-nossl) + DEPENDS+= +bash +msmtp TITLE+= (queue scripts) endef @@ -119,8 +124,6 @@ endef define Package/msmtp-mta/install $(INSTALL_DIR) $(1)/usr/sbin $(1)/usr/lib - ln -sf ../bin/msmtp $(1)/usr/sbin/sendmail - ln -sf ../bin/msmtp $(1)/usr/lib/sendmail endef Package/msmtp-nossl/conffiles = $(Package/msmtp/conffiles) diff --git a/multimedia/graphicsmagick/Makefile b/multimedia/graphicsmagick/Makefile index 85ce60221..69b3e16c5 100644 --- a/multimedia/graphicsmagick/Makefile +++ b/multimedia/graphicsmagick/Makefile @@ -5,13 +5,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=graphicsmagick -PKG_VERSION:=1.3.32 +PKG_VERSION:=1.3.33 PKG_RELEASE:=1 PKG_BUILD_DIR:=$(BUILD_DIR)/GraphicsMagick-$(PKG_VERSION) PKG_SOURCE:=GraphicsMagick-$(PKG_VERSION).tar.bz2 PKG_SOURCE_URL:=@SF/graphicsmagick -PKG_HASH:=d1f70bc6d41de922199ce6b0a04af7b3492b2fc4a2be6ee24e0af4e15250db0a +PKG_HASH:=d18aaca2d79a10270d49ad1aaa01dce24752f7548880138d59874a78ac62e11f PKG_LICENSE:=MIT PKG_LICENSE_FILES:=Copyright.txt diff --git a/multimedia/imagemagick/Makefile b/multimedia/imagemagick/Makefile index fd2fc24bc..d61c9160b 100644 --- a/multimedia/imagemagick/Makefile +++ b/multimedia/imagemagick/Makefile @@ -7,13 +7,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=imagemagick PKG_VERSION:=7.0.8 -PKG_REVISION:=49 +PKG_REVISION:=59 PKG_RELEASE:=1 PKG_MAINTAINER:=Val Kulkov <val.kulkov@gmail.com> PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_REVISION).tar.gz PKG_SOURCE_URL:=http://github.com/ImageMagick/ImageMagick/archive/$(PKG_VERSION)-$(PKG_REVISION) -PKG_HASH:=53f7963bbe81520e799e9e178a13757890ed43bc9faf2e86fae1cf58aea28575 +PKG_HASH:=238ee17196fcb80bb58485910aaefc12d48f99e4043c2a28f06ff9588161c4e3 PKG_BUILD_DIR:=$(BUILD_DIR)/ImageMagick-$(PKG_VERSION)-$(PKG_REVISION) PKG_LICENSE:=Apache-2.0 diff --git a/multimedia/youtube-dl/Makefile b/multimedia/youtube-dl/Makefile index 92c85f258..813574e51 100644 --- a/multimedia/youtube-dl/Makefile +++ b/multimedia/youtube-dl/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=youtube-dl -PKG_VERSION:=2019.7.16 +PKG_VERSION:=2019.8.2 PKG_RELEASE:=1 PKG_SOURCE:=youtube_dl-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://files.pythonhosted.org/packages/source/y/youtube_dl/ -PKG_HASH:=867651dbfc0cefd19bfa0750ce97c43e8436b2de414675d3211615840d6833ca +PKG_HASH:=73b8528782f507dc506422557c940842e1e514ffaf0b010d82642cb2ceeefdf7 PKG_BUILD_DIR:=$(BUILD_DIR)/youtube_dl-$(PKG_VERSION) PKG_MAINTAINER:=Adrian Panella <ianchi74@outlook.com>, Josef Schlehofer <pepe.schlehofer@gmail.com> diff --git a/net/adblock/Makefile b/net/adblock/Makefile index 79ffea4a0..a195490d3 100644 --- a/net/adblock/Makefile +++ b/net/adblock/Makefile @@ -6,8 +6,8 @@ include $(TOPDIR)/rules.mk PKG_NAME:=adblock -PKG_VERSION:=3.6.5 -PKG_RELEASE:=2 +PKG_VERSION:=3.8.0 +PKG_RELEASE:=1 PKG_LICENSE:=GPL-3.0+ PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org> @@ -22,7 +22,7 @@ define Package/adblock endef define Package/adblock/description -Powerful adblock script to block ad/abuse domains via dnsmasq, unbound, named, kresd or dnscrypt-proxy. +Powerful adblock script to block ad/abuse domains via dnsmasq, unbound, named or kresd. The script supports many domain blacklist sites plus manual black- and whitelist overrides. Please see https://github.com/openwrt/packages/blob/master/net/adblock/files/README.md for further information. @@ -45,7 +45,7 @@ endef define Package/adblock/install $(INSTALL_DIR) $(1)/usr/bin - $(INSTALL_BIN) ./files/adblock.sh $(1)/usr/bin/ + $(INSTALL_BIN) ./files/adblock.sh $(1)/usr/bin $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/adblock.init $(1)/etc/init.d/adblock @@ -54,9 +54,10 @@ define Package/adblock/install $(INSTALL_CONF) ./files/adblock.conf $(1)/etc/config/adblock $(INSTALL_DIR) $(1)/etc/adblock - $(INSTALL_CONF) ./files/adblock.notify $(1)/etc/adblock/ - $(INSTALL_CONF) ./files/adblock.blacklist $(1)/etc/adblock/ - $(INSTALL_CONF) ./files/adblock.whitelist $(1)/etc/adblock/ + $(INSTALL_BIN) ./files/adblock.mail $(1)/etc/adblock + $(INSTALL_BIN) ./files/adblock.service $(1)/etc/adblock + $(INSTALL_CONF) ./files/adblock.blacklist $(1)/etc/adblock + $(INSTALL_CONF) ./files/adblock.whitelist $(1)/etc/adblock endef $(eval $(call BuildPackage,adblock)) diff --git a/net/adblock/files/README.md b/net/adblock/files/README.md index 36eeee412..1e26b015c 100644 --- a/net/adblock/files/README.md +++ b/net/adblock/files/README.md @@ -61,44 +61,43 @@ A lot of people already use adblocker plugins within their desktop browsers, but * => weekly updates, approx. 2.500 entries (enabled by default) * zero-conf like automatic installation & setup, usually no manual changes needed * simple but yet powerful adblock engine: adblock does not use error prone external iptables rulesets, http pixel server instances and things like that -* supports five different dns backends / blocklist formats: dnsmasq, unbound, named (bind), kresd and dnscrypt-proxy -* supports six different download utilities: uclient-fetch, wget, curl, aria2c, wget-nossl, busybox-wget -* Really fast downloads & list processing as they are handled in parallel as background jobs in a configurable 'Download Queue' -* provides 'http only' mode without installed ssl library for all non-SSL blocklist sources -* supports a wide range of router modes, even AP modes are supported +* support four different dns backends: dnsmasq, unbound, named (bind) and kresd +* support two different dns blocking variants: 'nxdomain' (default, supported by all backends), 'null' (supported only by dnsmasq) +* support six different download utilities: uclient-fetch, wget, curl, aria2c, wget-nossl, busybox-wget +* fast downloads & list processing as they are handled in parallel running background jobs (see 'Download Queue') +* provide 'http only' mode without installed ssl library for all non-SSL blocklist sources +* support a wide range of router modes, even AP modes are supported * full IPv4 and IPv6 support -* provides top level domain compression ('tld compression'), this feature removes thousands of needless host entries from the blocklist and lowers the memory footprint for the dns backend +* provide top level domain compression ('tld compression'), this feature removes thousands of needless host entries from the blocklist and lowers the memory footprint for the dns backend +* provide a 'DNS File Reset', where the final DNS blockfile will be purged after DNS backend loading to save storage space * blocklist source parsing by fast & flexible regex rulesets * overall duplicate removal in central blocklist 'adb_list.overall' -* additional whitelist for manual overrides, located by default in /etc/adblock/adblock.whitelist +* additional blacklist for manual overrides, located by default in /etc/adblock/adblock.blacklist or in LuCI +* additional whitelist for manual overrides, located by default in /etc/adblock/adblock.whitelist or in LuCI * quality checks during blocklist update to ensure a reliable dns backend service * minimal status & error logging to syslog, enable debug logging to receive more output * procd based init system support (start/stop/restart/reload/suspend/resume/query/status) * procd network interface trigger support or classic time based startup * keep the dns cache intact after adblock processing (currently supported by unbound, named and kresd) -* conditional dns backend restarts by old/new blocklist comparison with sha256sum (default) or md5sum * suspend & resume adblock actions temporarily without blocklist reloading * provide comprehensive runtime information via LuCI or via 'status' init command * provide a detailed DNS Query Report with dns related information about client requests, top (blocked) domains and more -* provide a query function to quickly identify blocked (sub-)domains, e.g. for whitelisting. This function is also able to search in adblock backups, to get back the set of blocking lists sources for a certain domain -* force dns requests to local resolver -* force overall sort / duplicate removal for low memory devices (handle with care!) -* automatic blocklist backup & restore, they will be used in case of download errors or during startup in backup mode -* 'backup mode' to re-use blocklist backups during startup, get fresh lists only via reload or restart action -* 'Jail' blocklist generation which builds an additional list (/tmp/adb_list.jail) to block access to all domains except those listed in the whitelist file. You can use this restrictive blocklist manually e.g. for guest wifi or kidsafe configurations -* send notification emails in case of a processing error or if the overall domain count is ≤ 0 +* provide a query function to quickly identify blocked (sub-)domains, e.g. for whitelisting. This function is also able to search in adblock backups and black-/whitelist, to get back the set of blocking lists sources for a certain domain +* option to force dns requests to the local resolver +* automatic blocklist backup & restore, these backups will be used in case of download errors and during startup +* send notification E-Mails in case of a processing error or if the overall domain count is ≤ 0 * add new adblock sources on your own, see example below -* strong LuCI support +* strong LuCI support for all options ## Prerequisites -* [OpenWrt](https://openwrt.org), tested with the stable release series (18.06) and with the latest snapshot +* [OpenWrt](https://openwrt.org), tested with the stable release series (19.07) and with the latest snapshot * a usual setup with an enabled dns backend at minimum - dump AP modes without a working dns backend are _not_ supported * a download utility: * to support all blocklist sources a full version (with ssl support) of 'wget', 'uclient-fetch' with one of the 'libustream-*' ssl libraries, 'aria2c' or 'curl' is required * for limited devices with real memory constraints, adblock provides also a 'http only' option and supports wget-nossl and uclient-fetch (without libustream-ssl) as well * for more configuration options see examples below -* email notification (optional): for email notification support you need to install and configure the additional 'msmtp' package -* DNS Query Report (optional): for this detailed report you need to install the additional package 'tcpdump' or 'tcpdump-mini' +* E-Mail notification (optional): for E-Mail notification support you need the additional 'msmtp' package +* DNS Query Report (optional): for this detailed report you need the additional package 'tcpdump' or 'tcpdump-mini' ## Installation & Usage * install 'adblock' (_opkg install adblock_) @@ -108,56 +107,59 @@ A lot of people already use adblocker plugins within their desktop browsers, but ## LuCI adblock companion package * it's strongly recommended to use the LuCI frontend to easily configure all powerful aspects of adblock * install 'luci-app-adblock' (_opkg install luci-app-adblock_) -* the application is located in LuCI under 'Services' menu +* the application is located in LuCI under the 'Services' menu ## Tweaks * **runtime information:** the adblock status is available via _/etc/init.d/adblock status_ (see example below) * **debug logging:** for script debugging please set the config option 'adb\_debug' to '1' and check the runtime output with _logread -e "adblock"_ -* **storage expansion:** to process and store all blocklist sources at once it might helpful to enlarge your temp directory with a swap partition => see [OpenWrt Wiki](https://wiki.openwrt.org/doc/uci/fstab) for further details -* **add white- / blacklist entries:** add domain white- or blacklist entries to always-allow or -deny certain (sub) domains, by default both lists are empty and located in _/etc/adblock_. Please add one domain per line - ip addresses, wildcards & regex are _not_ allowed (see example below) -* **backup & restore blocklists:** enable this feature, to restore automatically the latest compressed backup of your blocklists in case of any processing error (e.g. a single blocklist source is not available during update). Please use an (external) solid partition and _not_ your volatile router temp directory for this +* **storage expansion:** to process and store all blocklist sources at once it might be helpful to enlarge your temp directory with a swap partition => see [OpenWrt Wiki](https://openwrt.org/docs/guide-user/storage/fstab) for further details +* **add white- / blacklist entries:** add domain black- or whitelist entries to always-deny or -allow certain (sub) domains, by default both lists are empty and located in _/etc/adblock_. Please add one domain per line - ip addresses, wildcards & regex are _not_ allowed (see example below). You need to refresh your blocklists after changes to these static lists. * **download queue size:** for further download & list processing performance improvements you can raise the 'adb\_maxqueue' value, e.g. '8' or '16' should be safe * **scheduled list updates:** for a scheduled call of the adblock service add an appropriate crontab entry (see example below) * **change startup behaviour:** by default the startup will be triggered by the 'wan' procd interface trigger. Choose 'none' to disable automatic startups, 'timed' to use a classic timeout (default 30 sec.) or select another trigger interface * **suspend & resume adblocking:** to quickly switch the adblock service 'on' or 'off', simply use _/etc/init.d/adblock [suspend|resume]_ * **domain query:** to query the active blocklist for a certain domain, please use the LuCI frontend or run _/etc/init.d/adblock query `<DOMAIN>`_ (see example below) * **add new list sources:** you could add new blocklist sources on your own via uci config, all you need is a source url and an awk one-liner (see example below) -* **disable active dns probing in windows 10:** to prevent a yellow exclamation mark on your internet connection icon (which wrongly means connected, but no internet), please change the following registry key/value from "1" to "0" _HKLM\SYSTEM\CurrentControlSet\Services\NlaSvc\Parameters\Internet\EnableActiveProbing_ ## Further adblock config options * usually the pre-configured adblock setup works quite well and no manual overrides are needed * the following options apply to the 'global' config section: * adb\_enabled => main switch to enable/disable adblock service (default: '0', disabled) - * adb\_debug => enable/disable adblock debug output (default: '0', disabled) + * adb\_dns => select the dns backend for your environment: 'dnsmasq', 'unbound', 'named' or 'kresd' (default: 'dnsmasq') + * adb\_dnsvariant => select the blocking variant: 'nxdomain' (default, supported by all backends), 'null (IPv4)' and 'null (IPv4/IPv6)' both options are only supported by dnsmasq * adb\_fetchutil => name of the used download utility: 'uclient-fetch', 'wget', 'curl', 'aria2c', 'wget-nossl'. 'busybox' (default: 'uclient-fetch') * adb\_fetchparm => special config options for the download utility (default: not set) - * adb\_dns => select the dns backend for your environment: 'dnsmasq', 'unbound', 'named', 'kresd' or 'dnscrypt-proxy' (default: 'dnsmasq') - * adb\_dnsdir => target directory for the generated blocklist 'adb_list.overall' (default: not set, use dns backend default) * adb\_trigger => set the startup trigger to a certain interface, to 'timed' or to 'none' (default: 'wan') - * the following options apply to the 'extra' config section: + * adb\_debug => enable/disable adblock debug output (default: '0', disabled) * adb\_nice => set the nice level of the adblock process and all sub-processes (int/default: '0', standard priority) - * adb\_triggerdelay => additional trigger delay in seconds before adblock processing begins (int/default: '2') * adb\_forcedns => force dns requests to local resolver (bool/default: '0', disabled) - * adb\_backup => create compressed blocklist backups, they will be used in case of download errors or during startup in backup mode (bool/default: '0', disabled) - * adb\_backupdir => target directory for adblock backups (default: not set) - * adb\_backup_mode => do not automatically update blocklists during startup, use backups instead (bool/default: '0', disabled) + * adb\_maxqueue => size of the download queue to handle downloads & list processing in parallel (int/default: '8') + * adb\_dnsfilereset => the final DNS blockfile will be purged after DNS backend loading to save storage space (bool/default: 'false', disabled) * adb\_report => enable the background tcpdump gathering process to provide a detailed DNS Query Report (bool/default: '0', disabled) * adb\_repdir => target directory for dns related report files generated by tcpdump (default: '/tmp') + * adb\_backupdir => target directory for adblock backups (default: '/tmp') + * adb\_mail => send notification E-Mails in case of a processing errors or if the overall domain count is ≤ 0 (bool/default: '0', disabled) + * adb\_mreceiver => receiver address for adblock notification E-Mails (default: not set) +* the following options could be added via "Additional Field" in LuCI and apply to the 'extra' config section as well: + * adb\_dnsdir => target directory for the generated blocklist 'adb_list.overall' (default: not set, use dns backend default) + * adb\_blacklist => full path to the static blacklist file (default: '/etc/adblock/adblock.blacklist') + * adb\_whitelist => full path to the static whitelist file (default: '/etc/adblock/adblock.whitelist') + * adb\_triggerdelay => additional trigger delay in seconds before adblock processing begins (int/default: '2') + * adb\_dnsflush => flush DNS cache after adblock processing, i.e. enable the old restart behavior (bool/default: '0', disabled) * adb\_repiface => reporting interface used by tcpdump, set to 'any' for multiple interfaces (default: 'br-lan') * adb\_replisten => space separated list of reporting port(s) used by tcpdump (default: '53') - * adb\_repchunksize => report chunk size used by tcpdump in MB (int/default: '1') * adb\_repchunkcnt => report chunk count used by tcpdump (default: '5') - * adb\_maxqueue => size of the download queue to handle downloads & list processing in parallel (int/default: '8') - * adb\_jail => builds an additional 'Jail' list (/tmp/adb_list.jail) to block access to all domains except those listed in the whitelist file (bool/default: '0', disabled) - * adb\_dnsflush => flush DNS cache after adblock processing, i.e. enable the old restart behavior (bool/default: '0', disabled) - * adb\_notify => send notification emails in case of a processing error or if the overall domain count is ≤ 0 (bool/default: '0', disabled) - * adb\_notifycnt => Raise minimum domain count email notification trigger (int/default: '0') + * adb\_repchunksize => report chunk size used by tcpdump in MB (int/default: '1') + * adb\_msender => sender address for adblock notification E-Mails (default: 'no-reply@adblock') + * adb\_mtopic => topic for adblock notification E-Mails (default: 'adblock notification') + * adb\_mprofile => mail profile used in 'msmtp' for adblock notification E-Mails (default: 'adb_notify') + * adb\_mcnt => raise the minimum domain count E-Mmail notification trigger (int/default: '0') ## Examples **change default dns backend to 'unbound':** -Adblock deposits the final blocklist 'adb_list.overall' in '/var/lib/unbound' where unbound can find them in its jail. +Adblock deposits the final blocklist 'adb_list.overall' in '/var/lib/unbound' where unbound can find them in its jail, no further configuration needed. To preserve the DNS cache after adblock processing you need to install 'unbound-control'. **change default dns backend to 'named' (bind):** @@ -183,25 +185,7 @@ and at the end of the file add: The knot-resolver (kresd) is only available on Turris Omnia devices. Adblock deposits the final blocklist 'adb_list.overall' in '/etc/kresd', no further configuration needed. -**change default dns backend to 'dnscrypt-proxy':** - -The required 'blacklist' option of dnscrypt-proxy is not enabled by default, because the package will be compiled without plugins support. -Take a custom OpenWrt build with plugins support to use this feature. Adblock deposits the final blocklist 'adb_list.overall' in '/tmp'. -To use the blocklist please modify '/etc/config/dnscrypt-proxy' per instance: -<pre><code> - list blacklist 'domains:/tmp/adb_list.overall' -</code></pre> - -**reference the jail block list manually in a 'kidsafe' dhcp config:** - -The additional 'Jail' blocklist (by default in /tmp/adb_list.jail) block access to all domains except those listed in the whitelist file. -<pre><code> -config dnsmasq 'kidsafe' - [...] - option serversfile '/tmp/adb_list.jail' -</code></pre> - -**enable email notification via msmtp:** +**enable E-Mail notification via msmtp:** To use the email notification you have to install & configure the package 'msmtp'. Modify the file '/etc/msmtprc': @@ -221,8 +205,7 @@ from dev.adblock@gmail.com user dev.adblock password xxx </code></pre> -Edit the file '/etc/adblock/adblock.notify' and change at least the 'mail_receiver'. -Finally make this file executable via 'chmod' and test it directly. If no more errors come up you can comment 'mail_debug', too. +Finally enable E-Mail support and add a valid E-Mail address in LuCI. **receive adblock runtime information:** @@ -230,12 +213,14 @@ Finally make this file executable via 'chmod' and test it directly. If no more e /etc/init.d/adblock status ::: adblock runtime information + adblock_status : enabled - + adblock_version : 3.6.0 - + overall_domains : 30267 (backup mode) + + adblock_version : 3.8.0 + + overall_domains : 48359 + fetch_utility : /bin/uclient-fetch (libustream-ssl) - + dns_backend : dnsmasq (/tmp) - + last_rundate : 19.12.2018 16:29:25 - + system_release : GL-AR750S, OpenWrt SNAPSHOT r8814-6835c13e5a + + dns_backend : dnsmasq, /tmp + + dns_variant : null (IPv4/IPv6), true + + backup_dir : /mnt/data/adblock + + last_rundate : 15.08.2019 08:43:16 + + system_release : GL.iNet GL-AR750S, OpenWrt SNAPSHOT r10720-ccb4b96b8a </code></pre> **receive adblock DNS Query Report information:** @@ -272,6 +257,7 @@ Finally make this file executable via 'chmod' and test it directly. If no more e + 2 ::: v10.events.data.microsoft.com + 2 ::: settings-win.data.microsoft.com + 2 ::: nexusrules.officeapps.live.com +[...] </code></pre> **cronjob for a regular block list update (/etc/crontabs/root):** @@ -309,23 +295,28 @@ This entry does not remove: www.adwhere.com </code></pre> -**query the active blocklist for a certain (sub-)domain, e.g. for whitelisting:** +**query the active blocklist, the backups and black-/whitelist for a certain (sub-)domain, e.g. for whitelisting:** The query function checks against the submitted (sub-)domain and recurses automatically to the upper top level domain. For every (sub-)domain it returns the first ten relevant results. <pre><code> /etc/init.d/adblock query google.com ::: -::: results for domain 'google.com' +::: results for domain 'google.com' in active blocklist ::: + + adservice.google.com + + adservice.google.com.au + + adservice.google.com.vn + + adservices.google.com + analytics.google.com + googleadapis.l.google.com + pagead.l.google.com + partnerad.l.google.com + ssl-google-analytics.l.google.com - + www-google-analytics.l.google.com + video-stats.video.google.com + + [...] + ::: -::: results for domain 'google.com' in backups +::: results for domain 'google.com' in backups and black-/whitelist ::: + adb_list.adguard.gz partnerad.l.google.com + adb_list.adguard.gz googleadapis.l.google.com @@ -335,9 +326,13 @@ The query function checks against the submitted (sub-)domain and recurses automa + adb_list.disconnect.gz partnerad.l.google.com + adb_list.disconnect.gz video-stats.video.google.com + adb_list.disconnect.gz [...] + + adb_list.whocares.gz video-stats.video.google.com + + adb_list.whocares.gz adservice.google.com + + adb_list.whocares.gz adservice.google.com.au + + adb_list.whocares.gz [...] + + adb_list.yoyo.gz adservice.google.com + adb_list.yoyo.gz analytics.google.com + adb_list.yoyo.gz pagead.l.google.com - + adb_list.yoyo.gz partnerad.l.google.com + adb_list.yoyo.gz [...] </code></pre> @@ -361,9 +356,5 @@ To add a really new source with different domain/host format you have to write a ## Support Please join the adblock discussion in this [forum thread](https://forum.openwrt.org/t/adblock-support-thread/507) or contact me by mail <dev@brenken.org> -## Removal -* stop all adblock related services with _/etc/init.d/adblock stop_ -* optional: remove the adblock package (_opkg remove adblock_) - Have fun! Dirk diff --git a/net/adblock/files/adblock.conf b/net/adblock/files/adblock.conf index 8b47627d4..fad665ba4 100644 --- a/net/adblock/files/adblock.conf +++ b/net/adblock/files/adblock.conf @@ -1,16 +1,17 @@ config adblock 'global' + option adb_basever '3.8' option adb_enabled '0' option adb_dns 'dnsmasq' + option adb_dnsvariant 'nxdomain' option adb_fetchutil 'uclient-fetch' option adb_trigger 'wan' config adblock 'extra' option adb_debug '0' option adb_forcedns '0' - option adb_backup '0' option adb_report '0' - option adb_maxqueue '8' + option adb_maxqueue '4' config source 'adaway' option adb_src 'https://adaway.org/hosts.txt' @@ -30,12 +31,6 @@ config source 'bitcoin' option adb_src_desc 'focus on malicious bitcoin mining sites, infrequent updates, approx. 80 entries' option enabled '0' -config source 'blacklist' - option adb_src '/etc/adblock/adblock.blacklist' - option adb_src_rset '/^([[:alnum:]_-]+\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}' - option adb_src_desc 'static local domain blacklist, always deny these domains' - option enabled '1' - config source 'disconnect' option adb_src 'https://s3.amazonaws.com/lists.disconnect.me/simple_malvertising.txt' option adb_src_rset '/^([[:alnum:]_-]+\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}' diff --git a/net/adblock/files/adblock.init b/net/adblock/files/adblock.init index 74cbf01bb..b5369230a 100755 --- a/net/adblock/files/adblock.init +++ b/net/adblock/files/adblock.init @@ -52,7 +52,6 @@ reload_service() stop_service() { rc_procd "${adb_script}" stop - rc_procd start_service } restart() @@ -63,13 +62,13 @@ restart() suspend() { [ -s "${adb_pidfile}" ] && return 1 - rc_procd "${adb_script}" suspend + rc_procd start_service suspend } resume() { [ -s "${adb_pidfile}" ] && return 1 - rc_procd "${adb_script}" resume + rc_procd start_service resume } query() @@ -91,17 +90,17 @@ status() rtfile="${rtfile:-"/tmp/adb_runtime.json"}" if [ -s "${rtfile}" ] then - printf "%s\n" "::: adblock runtime information" + printf "%s\\n" "::: adblock runtime information" json_load_file "${rtfile}" json_select data json_get_keys keylist for key in ${keylist} do json_get_var value "${key}" - printf " + %-15s : %s\n" "${key}" "${value}" + printf " + %-15s : %s\\n" "${key}" "${value}" done else - printf "%s\n" "::: no adblock runtime information available" + printf "%s\\n" "::: no adblock runtime information available" fi } diff --git a/net/adblock/files/adblock.mail b/net/adblock/files/adblock.mail new file mode 100755 index 000000000..3b4d69cb6 --- /dev/null +++ b/net/adblock/files/adblock.mail @@ -0,0 +1,71 @@ +#!/bin/sh +# +# send mail script for adblock notifications +# written by Dirk Brenken (dev@brenken.org) +# Please note: you have to manually install and configure the package 'msmtp' before using this script + +# This is free software, licensed under the GNU General Public License v3. +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +LC_ALL=C +PATH="/usr/sbin:/usr/bin:/sbin:/bin" + +if [ -r "/lib/functions.sh" ] +then + . "/lib/functions.sh" + adb_basever="$(uci_get adblock global adb_basever)" + adb_debug="$(uci_get adblock extra adb_debug "0")" + adb_msender="$(uci_get adblock extra adb_msender "no-reply@adblock")" + adb_mreceiver="$(uci_get adblock extra adb_mreceiver)" + adb_mtopic="$(uci_get adblock extra adb_mtopic "adblock notification")" + adb_mprofile="$(uci_get adblock extra adb_mprofile "adb_notify")" +fi +adb_mail="$(command -v msmtp)" +adb_rc=1 + +if [ "${adb_debug}" -eq 1 ] +then + debug="--debug" +fi + +# mail header & receiver check +# +if [ -z "${adb_mreceiver}" ] +then + logger -p "err" -t "adblock-${adb_basever} [${$}]" "please set the mail receiver with the 'adb_mreceiver' option" + exit ${adb_rc} +fi +adb_mhead="From: ${adb_msender}\\nTo: ${adb_mreceiver}\\nSubject: ${adb_mtopic}\\nReply-to: ${adb_msender}\\nMime-Version: 1.0\\nContent-Type: text/html\\nContent-Disposition: inline\\n\\n" + +# info preparation +# +sys_info="$(strings /etc/banner 2>/dev/null; ubus call system board | sed -e 's/\"release\": {//' | sed -e 's/^[ \t]*//' | sed -e 's/[{}\",]//g' | sed -e 's/[ ]/ \t/' | sed '/^$/d' 2>/dev/null)" +adb_info="$(/etc/init.d/adblock status 2>/dev/null)" +if [ -f "/var/log/messages" ] +then + log_info="$(awk '/adblock-/{NR=1;max=79;if(length($0)>max+1)while($0){if(NR==1){print substr($0,1,max),"↵"} else {print " ",substr($0,1,max)}{$0=substr($0,max+1);NR=NR+1}}else print}' /var/log/messages)" +else + log_info="$(logread -e "adblock-" | awk '{NR=1;max=79;if(length($0)>max+1)while($0){if(NR==1){print substr($0,1,max),"↵"} else {print " ",substr($0,1,max)}{$0=substr($0,max+1);NR=NR+1}}else print}')" +fi + +# mail body +# +adb_mtext="<html><body><pre style='display:block;font-family:monospace;font-size:1rem;padding:20;background-color:#f3eee5;white-space:pre'>" +adb_mtext="${adb_mtext}\\n<strong>++\\n++ System Information ++\\n++</strong>\\n${sys_info}" +adb_mtext="${adb_mtext}\\n\\n<strong>++\\n++ Adblock Information ++\\n++</strong>\\n${adb_info}" +adb_mtext="${adb_mtext}\\n\\n<strong>++\\n++ Logfile Information ++\\n++</strong>\\n${log_info}" +adb_mtext="${adb_mtext}</pre></body></html>" + +# send mail +# +if [ -x "${adb_mail}" ] +then + printf "%b" "${adb_mhead}${adb_mtext}" 2>/dev/null | "${adb_mail}" ${debug} -a "${adb_mprofile}" "${adb_mreceiver}" >/dev/null 2>&1 + adb_rc=${?} + logger -p "info" -t "adblock-${adb_basever} [${$}]" "mail sent to '${adb_mreceiver}' with rc '${adb_rc}'" +else + logger -p "err" -t "adblock-${adb_basever} [${$}]" "msmtp mail daemon not found" +fi + +exit ${adb_rc} diff --git a/net/adblock/files/adblock.notify b/net/adblock/files/adblock.notify deleted file mode 100644 index 54f0288d5..000000000 --- a/net/adblock/files/adblock.notify +++ /dev/null @@ -1,69 +0,0 @@ -#!/bin/sh -# -# adblock send mail script for msmtp -# written by Dirk Brenken (dev@brenken.org) -# Please note: you have to install and configure the package 'msmtp' before using this script. - -# This is free software, licensed under the GNU General Public License v3. -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. - -LC_ALL=C -PATH="/usr/sbin:/usr/bin:/sbin:/bin" -mail_ver="1.0.4" -mail_daemon="$(command -v msmtp)" -mail_profile="adb_notify" -#mail_debug="--debug" -mail_rc=1 - -# mail header & mail receiver check -# -mail_receiver="" -mail_sender="no-reply@adblock" -mail_topic="${HOSTNAME}: adblock notification" -mail_head="From: ${mail_sender}\nTo: ${mail_receiver}\nSubject: ${mail_topic}\nReply-to: ${mail_sender}\nMime-Version: 1.0\nContent-Type: text/html\nContent-Disposition: inline\n\n" - -if [ -z "${mail_receiver}" ] -then - logger -p "err" -t "adblock-notify-${mail_ver}[${$}]" "please supply/customize the 'mail_receiver' in '/etc/adblock/adblock.notify'" - exit ${mail_rc} -fi - -# mail daemon check -# -if [ ! -x "${mail_daemon}" ] -then - mail_daemon="$(command -v sendmail)" -fi - -# info preparation -# -sys_info="$(strings /etc/banner 2>/dev/null; ubus call system board | sed -e 's/\"release\": {//' | sed -e 's/^[ \t]*//' | sed -e 's/[{}\",]//g' | sed -e 's/[ ]/ \t/' | sed '/^$/d' 2>/dev/null)" -adb_info="$(/etc/init.d/adblock status 2>/dev/null)" -if [ -f "/var/log/messages" ] -then - log_info="$(awk '/adblock-/{NR=1;max=79;if(length($0)>max+1)while($0){if(NR==1){print substr($0,1,max),"↵"} else {print " ",substr($0,1,max)}{$0=substr($0,max+1);NR=NR+1}}else print}' /var/log/messages)" -else - log_info="$(logread -e "adblock-" | awk '{NR=1;max=79;if(length($0)>max+1)while($0){if(NR==1){print substr($0,1,max),"↵"} else {print " ",substr($0,1,max)}{$0=substr($0,max+1);NR=NR+1}}else print}')" -fi - -# mail body -# -mail_text="<html><body><pre style='display:block;font-family:monospace;font-size:1rem;padding:20;background-color:#f3eee5;white-space:pre'>" -mail_text="${mail_text}\n<strong>++\n++ System Information ++\n++</strong>\n${sys_info}" -mail_text="${mail_text}\n\n<strong>++\n++ Adblock Information ++\n++</strong>\n${adb_info}" -mail_text="${mail_text}\n\n<strong>++\n++ Logfile Information ++\n++</strong>\n${log_info}" -mail_text="${mail_text}</pre></body></html>" - -# send mail -# -if [ -x "${mail_daemon}" ] -then - printf "%b" "${mail_head}${mail_text}" 2>/dev/null | "${mail_daemon}" ${mail_debug} -a "${mail_profile}" "${mail_receiver}" >/dev/null 2>&1 - mail_rc=${?} - logger -p "info" -t "adblock-notify-${mail_ver}[${$}]" "mail sent to '${mail_receiver}' with rc '${mail_rc}'" -else - logger -p "err" -t "adblock-notify-${mail_ver}[${$}]" "msmtp mail daemon not found" -fi - -exit ${mail_rc} diff --git a/net/adblock/files/adblock.service b/net/adblock/files/adblock.service new file mode 100755 index 000000000..1265c139e --- /dev/null +++ b/net/adblock/files/adblock.service @@ -0,0 +1,27 @@ +#!/bin/sh +# ubus monitor to trace dns backend events and conditionally restart adblock +# written by Dirk Brenken (dev@brenken.org) + +# This is free software, licensed under the GNU General Public License v3. +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +LC_ALL=C +PATH="/usr/sbin:/usr/bin:/sbin:/bin" + +if [ -r "/lib/functions.sh" ] +then + . "/lib/functions.sh" + adb_basever="$(uci_get adblock global adb_basever)" + adb_dns="$(uci_get adblock global adb_dns)" +fi +adb_ubus="$(command -v ubus)" + +if [ -x "${adb_ubus}" ] && [ -n "${adb_dns}" ] +then + logger -p "info" -t "adblock-${adb_basever} [${$}]" "ubus/adblock service started" + "${adb_ubus}" -S -M r -m invoke monitor | \ + { grep -qF "\"method\":\"set\",\"data\":{\"name\":\"${adb_dns}\""; [ $? -eq 0 ] && /etc/init.d/adblock start; } +else + logger -p "err" -t "adblock-${adb_basever} [${$}]" "can't start ubus/adblock service" +fi diff --git a/net/adblock/files/adblock.sh b/net/adblock/files/adblock.sh index 24e58b28f..e4bba187a 100755 --- a/net/adblock/files/adblock.sh +++ b/net/adblock/files/adblock.sh @@ -10,30 +10,27 @@ # LC_ALL=C PATH="/usr/sbin:/usr/bin:/sbin:/bin" -adb_ver="3.6.5-2" +adb_ver="3.8.0" adb_sysver="unknown" adb_enabled=0 adb_debug=0 adb_forcedns=0 -adb_jail=0 -adb_maxqueue=8 -adb_notify=0 -adb_notifycnt=0 +adb_maxqueue=4 +adb_mail=0 +adb_mcnt=0 +adb_trigger="wan" adb_triggerdelay=0 -adb_backup=0 -adb_backup_mode=0 -adb_backupdir="/mnt" +adb_backupdir="/tmp" adb_fetchutil="uclient-fetch" adb_dns="dnsmasq" +adb_dnsvariant="nxdomain" adb_dnsprefix="adb_list" adb_dnsfile="${adb_dnsprefix}.overall" -adb_dnsjail="${adb_dnsprefix}.jail" +adb_dnsfilereset="false" adb_dnsflush=0 +adb_blacklist="/etc/adblock/adblock.blacklist" adb_whitelist="/etc/adblock/adblock.whitelist" adb_rtfile="/tmp/adb_runtime.json" -adb_hashutil="$(command -v sha256sum)" -adb_hashold="" -adb_hashnew="" adb_report=0 adb_repiface="br-lan" adb_replisten="53" @@ -45,10 +42,13 @@ adb_cnt="" adb_rc=0 adb_action="${1:-"start"}" adb_pidfile="/var/run/adblock.pid" +adb_ubusservice="/etc/adblock/adblock.service" +adb_mailservice="/etc/adblock/adblock.mail" +adb_sources="" # load adblock environment # -f_envload() +f_load() { local dns_up sys_call sys_desc sys_model cnt=0 @@ -57,18 +57,11 @@ f_envload() sys_call="$(ubus -S call system board 2>/dev/null)" if [ -n "${sys_call}" ] then - sys_desc="$(printf '%s' "${sys_call}" | jsonfilter -e '@.release.description')" - sys_model="$(printf '%s' "${sys_call}" | jsonfilter -e '@.model')" + sys_desc="$(printf "%s" "${sys_call}" | jsonfilter -e '@.release.description')" + sys_model="$(printf "%s" "${sys_call}" | jsonfilter -e '@.model')" adb_sysver="${sys_model}, ${sys_desc}" fi - # check hash utility - # - if [ ! -x "${adb_hashutil}" ] - then - adb_hashutil="$(command -v md5sum)" - fi - # parse 'global' and 'extra' section by callback # config_cb() @@ -108,71 +101,78 @@ f_envload() config_load adblock config_foreach parse_config source - # check dns backend + # version check + # + if [ -z "${adb_basever}" ] || [ "${adb_ver%.*}" != "${adb_basever}" ] + then + f_log "info" "your adblock config seems to be too old, please update your config with the '--force-maintainer' opkg option" + exit 0 + fi + + # set dns backend # case "${adb_dns}" in - dnsmasq) + "dnsmasq") adb_dnsinstance="${adb_dnsinstance:-"0"}" adb_dnsuser="${adb_dnsuser:-"dnsmasq"}" adb_dnsdir="${adb_dnsdir:-"/tmp"}" adb_dnsheader="" - adb_dnsdeny="awk '{print \"server=/\"\$0\"/\"}'" - adb_dnsallow="awk '{print \"server=/\"\$0\"/#\"}'" - adb_dnshalt="server=/#/" + if [ "${adb_dnsvariant}" = "nxdomain" ] + then + adb_dnsdeny="awk '{print \"server=/\"\$0\"/\"}'" + adb_dnsallow="awk '{print \"server=/\"\$0\"/#\"}'" + elif [ "${adb_dnsvariant}" = "null (IPv4)" ] + then + adb_dnsdeny="awk '{print \"0.0.0.0\\t\"\$0\"\"}'" + elif [ "${adb_dnsvariant}" = "null (IPv4/IPv6)" ] + then + adb_dnsdeny="awk '{print \"0.0.0.0\\t\"\$0\"\\n::\\t\"\$0\"\"}'" + fi + adb_dnsallow="" ;; - unbound) + "unbound") adb_dnsinstance="${adb_dnsinstance:-"0"}" adb_dnsuser="${adb_dnsuser:-"unbound"}" adb_dnsdir="${adb_dnsdir:-"/var/lib/unbound"}" adb_dnsheader="" - adb_dnsdeny="awk '{print \"local-zone: \042\"\$0\"\042 static\"}'" - adb_dnsallow="awk '{print \"local-zone: \042\"\$0\"\042 transparent\"}'" - adb_dnshalt="local-zone: \".\" static" + adb_dnsdeny="awk '{print \"local-zone: \\042\"\$0\"\\042 static\"}'" + adb_dnsallow="awk '{print \"local-zone: \\042\"\$0\"\\042 transparent\"}'" ;; - named) + "named") adb_dnsinstance="${adb_dnsinstance:-"0"}" adb_dnsuser="${adb_dnsuser:-"bind"}" adb_dnsdir="${adb_dnsdir:-"/var/lib/bind"}" adb_dnsheader="\$TTL 2h"$'\n'"@ IN SOA localhost. root.localhost. (1 6h 1h 1w 2h)"$'\n'" IN NS localhost." - adb_dnsdeny="awk '{print \"\"\$0\" CNAME .\n*.\"\$0\" CNAME .\"}'" - adb_dnsallow="awk '{print \"\"\$0\" CNAME rpz-passthru.\n*.\"\$0\" CNAME rpz-passthru.\"}'" - adb_dnshalt="* CNAME ." + adb_dnsdeny="awk '{print \"\"\$0\" CNAME .\\n*.\"\$0\" CNAME .\"}'" + adb_dnsallow="awk '{print \"\"\$0\" CNAME rpz-passthru.\\n*.\"\$0\" CNAME rpz-passthru.\"}'" ;; - kresd) + "kresd") adb_dnsinstance="${adb_dnsinstance:-"0"}" adb_dnsuser="${adb_dnsuser:-"root"}" adb_dnsdir="${adb_dnsdir:-"/etc/kresd"}" adb_dnsheader="\$TTL 2h"$'\n'"@ IN SOA localhost. root.localhost. (1 6h 1h 1w 2h)"$'\n'" IN NS localhost." - adb_dnsdeny="awk '{print \"\"\$0\" CNAME .\n*.\"\$0\" CNAME .\"}'" - adb_dnsallow="awk '{print \"\"\$0\" CNAME rpz-passthru.\n*.\"\$0\" CNAME rpz-passthru.\"}'" - adb_dnshalt="* CNAME ." - ;; - dnscrypt-proxy) - adb_dnsinstance="${adb_dnsinstance:-"0"}" - adb_dnsuser="${adb_dnsuser:-"nobody"}" - adb_dnsdir="${adb_dnsdir:-"/tmp"}" - adb_dnsheader="" - adb_dnsdeny="awk '{print \$0}'" - adb_dnsallow="" - adb_dnshalt="" + adb_dnsdeny="awk '{print \"\"\$0\" CNAME .\\n*.\"\$0\" CNAME .\"}'" + adb_dnsallow="awk '{print \"\"\$0\" CNAME rpz-passthru.\\n*.\"\$0\" CNAME rpz-passthru.\"}'" ;; esac - # check adblock status + # status check # - if [ ${adb_enabled} -eq 0 ] + if [ "${adb_enabled}" -eq 0 ] then f_extconf f_temp f_rmdns f_jsnup "disabled" - f_log "info" "adblock is currently disabled, please set adb_enabled to '1' to use this service" + f_log "info" "adblock is currently disabled, please set the config option 'adb_enabled' to '1' to use this service" exit 0 fi + # dns backend check + # if [ -d "${adb_dnsdir}" ] && [ ! -f "${adb_dnsdir}/${adb_dnsfile}" ] then - printf '%s\n' "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}" + printf "%s\\n" "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}" fi if [ "${adb_action}" = "start" ] && [ "${adb_trigger}" = "timed" ] @@ -180,7 +180,7 @@ f_envload() sleep ${adb_triggerdelay} fi - while [ ${cnt} -le 30 ] + while [ "${cnt}" -le 30 ] do dns_up="$(ubus -S call service list "{\"name\":\"${adb_dns}\"}" 2>/dev/null | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.running" 2>/dev/null)" if [ "${dns_up}" = "true" ] @@ -193,27 +193,29 @@ f_envload() if [ "${dns_up}" != "true" ] || [ -z "${adb_dns}" ] || [ ! -x "$(command -v ${adb_dns})" ] then - f_log "err" "'${adb_dns}' not running or not executable" + f_log "err" "'${adb_dns}' not running or executable" elif [ ! -d "${adb_dnsdir}" ] then f_log "err" "'${adb_dnsdir}' backend directory not found" fi } -# check environment +# check & set environment # -f_envcheck() +f_env() { local ssl_lib - # startup message - # f_log "info" "adblock instance started ::: action: ${adb_action}, priority: ${adb_nice:-"0"}, pid: ${$}" f_jsnup "running" + f_extconf - # check external uci config files + # check backup directory # - f_extconf + if [ ! -d "${adb_backupdir}" ] + then + f_log "err" "the backup directory '${adb_backupdir}' does not exist/is not mounted yet, please create the directory or raise the 'adb_triggerdelay' to defer the adblock start" + fi # check fetch utility # @@ -260,15 +262,18 @@ f_envcheck() # f_temp() { - if [ -z "${adb_tmpdir}" ] + if [ -d "/tmp" ] && [ -z "${adb_tmpdir}" ] then adb_tmpdir="$(mktemp -p /tmp -d)" - adb_tmpload="$(mktemp -p ${adb_tmpdir} -tu)" - adb_tmpfile="$(mktemp -p ${adb_tmpdir} -tu)" + adb_tmpload="$(mktemp -p "${adb_tmpdir}" -tu)" + adb_tmpfile="$(mktemp -p "${adb_tmpdir}" -tu)" + elif [ ! -d "/tmp" ] + then + f_log "err" "the temp directory '/tmp' does not exist/is not mounted yet, please create the directory or raise the 'adb_triggerdelay' to defer the adblock start" fi if [ ! -s "${adb_pidfile}" ] then - printf '%s' "${$}" > "${adb_pidfile}" + printf "%s" "${$}" > "${adb_pidfile}" fi } @@ -283,22 +288,16 @@ f_rmtemp() > "${adb_pidfile}" } -# remove dns related files and directories +# remove dns related files, services and directories # f_rmdns() { if [ -n "${adb_dns}" ] then - f_hash - printf '%s\n' "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}" - > "${adb_dnsdir}/.${adb_dnsfile}" + printf "%s\\n" "${adb_dnsheader}" > "${adb_dnsdir}"/"${adb_dnsfile}" > "${adb_rtfile}" - rm -f "${adb_backupdir}/${adb_dnsprefix}"*.gz - f_hash - if [ ${?} -eq 1 ] - then - f_dnsup - fi + rm -f "${adb_backupdir}"/"${adb_dnsprefix}"*".gz" + f_dnsup f_rmtemp fi f_log "debug" "f_rmdns ::: dns: ${adb_dns}, dns_dir: ${adb_dnsdir}, dns_prefix: ${adb_dnsprefix}, dns_file: ${adb_dnsfile}, rt_file: ${adb_rtfile}, backup_dir: ${adb_backupdir}" @@ -317,7 +316,7 @@ f_uci() then uci_commit "${config}" case "${config}" in - firewall) + "firewall") /etc/init.d/firewall reload >/dev/null 2>&1 ;; *) @@ -336,17 +335,23 @@ f_count() local mode="${1}" adb_cnt=0 - if [ -s "${adb_dnsdir}/${adb_dnsfile}" ] && ([ -z "${mode}" ] || [ "${mode}" = "final" ]) + if [ -s "${adb_dnsdir}/${adb_dnsfile}" ] && { [ -z "${mode}" ] || [ "${mode}" = "final" ]; } then adb_cnt="$(wc -l 2>/dev/null < "${adb_dnsdir}/${adb_dnsfile}")" - if [ -s "${adb_tmpdir}/tmp.add_whitelist" ] + if [ -s "${adb_tmpdir}/tmp.add.whitelist" ] then - adb_cnt="$(( ${adb_cnt} - $(wc -l 2>/dev/null < "${adb_tmpdir}/tmp.add_whitelist") ))" + adb_cnt="$((adb_cnt-$(wc -l 2>/dev/null < "${adb_tmpdir}/tmp.add.whitelist")))" fi - if [ "${adb_dns}" = "named" ] || [ "${adb_dns}" = "kresd" ] + if [ "${adb_dns}" = "named" ] || [ "${adb_dns}" = "kresd" ] || { [ "${adb_dns}" = "dnsmasq" ] && [ "${adb_dnsvariant}" = "null (IPv4/IPv6)" ]; } then - adb_cnt="$(( (${adb_cnt} - $(printf '%s' "${adb_dnsheader}" | grep -c "^")) / 2 ))" + adb_cnt="$(((adb_cnt-$(printf "%s" "${adb_dnsheader}" | grep -c "^"))/2))" fi + elif [ "${mode}" = "blacklist" ] && [ -s "${adb_tmpfile}.blacklist" ] + then + adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpfile}.blacklist")" + elif [ "${mode}" = "whitelist" ] && [ -s "${adb_tmpdir}/tmp.raw.whitelist" ] + then + adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpdir}/tmp.raw.whitelist")" elif [ -s "${adb_tmpfile}" ] then adb_cnt="$(wc -l 2>/dev/null < "${adb_tmpfile}")" @@ -360,31 +365,49 @@ f_extconf() local uci_config port port_list="53 853 5353" case "${adb_dns}" in - dnsmasq) + "dnsmasq") uci_config="dhcp" - if [ ${adb_enabled} -eq 1 ] && [ -n "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]")" ] && \ - [ -z "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] && \ - [ -z "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile | grep -Fo "${adb_dnsdir}/${adb_dnsjail}")" ] + if [ "${adb_dnsvariant}" = "nxdomain" ] then - uci_set dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile "${adb_dnsdir}/${adb_dnsfile}" - elif [ ${adb_enabled} -eq 0 ] && [ -n "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + if [ "${adb_enabled}" -eq 1 ] && [ -z "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + then + uci_set dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile "${adb_dnsdir}/${adb_dnsfile}" + if [ "${adb_enabled}" -eq 1 ] && [ -n "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" addnhosts | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + then + uci -q del_list dhcp.@dnsmasq[${adb_dnsinstance}].addnhosts="${adb_dnsdir}/${adb_dnsfile}" + fi + elif [ "${adb_enabled}" -eq 0 ] && [ -n "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + then + uci_remove dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile + fi + elif [ "${adb_dnsvariant% *}" = "null" ] then - uci_remove dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile + if [ "${adb_enabled}" -eq 1 ] && [ -z "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" addnhosts | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + then + uci -q add_list dhcp.@dnsmasq[${adb_dnsinstance}].addnhosts="${adb_dnsdir}/${adb_dnsfile}" + if [ "${adb_enabled}" -eq 1 ] && [ -n "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + then + uci_remove dhcp "@dnsmasq[${adb_dnsinstance}]" serversfile + fi + elif [ "${adb_enabled}" -eq 0 ] && [ -n "$(uci_get dhcp "@dnsmasq[${adb_dnsinstance}]" addnhosts | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + then + uci_remove dhcp "@dnsmasq[${adb_dnsinstance}]" addnhosts + fi fi ;; - kresd) + "kresd") uci_config="resolver" - if [ ${adb_enabled} -eq 1 ] && [ -z "$(uci_get resolver kresd rpz_file | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + if [ "${adb_enabled}" -eq 1 ] && [ -z "$(uci_get resolver kresd rpz_file | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] then uci -q add_list resolver.kresd.rpz_file="${adb_dnsdir}/${adb_dnsfile}" - elif [ ${adb_enabled} -eq 0 ] && [ -n "$(uci_get resolver kresd rpz_file | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] + elif [ "${adb_enabled}" -eq 0 ] && [ -n "$(uci_get resolver kresd rpz_file | grep -Fo "${adb_dnsdir}/${adb_dnsfile}")" ] then uci -q del_list resolver.kresd.rpz_file="${adb_dnsdir}/${adb_dnsfile}" fi - if [ ${adb_enabled} -eq 1 ] && [ ${adb_dnsflush} -eq 0 ] && [ "$(uci_get resolver kresd keep_cache)" != "1" ] + if [ "${adb_enabled}" -eq 1 ] && [ "${adb_dnsflush}" -eq 0 ] && [ "$(uci_get resolver kresd keep_cache)" != "1" ] then uci_set resolver kresd keep_cache "1" - elif [ ${adb_enabled} -eq 0 ] || ([ ${adb_dnsflush} -eq 1 ] && [ "$(uci_get resolver kresd keep_cache)" = "1" ]) + elif [ "${adb_enabled}" -eq 0 ] || { [ "${adb_dnsflush}" -eq 1 ] && [ "$(uci_get resolver kresd keep_cache)" = "1" ]; } then uci_set resolver kresd keep_cache "0" fi @@ -393,8 +416,8 @@ f_extconf() f_uci "${uci_config}" uci_config="firewall" - if [ ${adb_enabled} -eq 1 ] && [ ${adb_forcedns} -eq 1 ] && \ - [ -z "$(uci_get firewall adblock_dns_53)" ] && [ $(/etc/init.d/firewall enabled; printf '%u' ${?}) -eq 0 ] + if [ "${adb_enabled}" -eq 1 ] && [ "${adb_forcedns}" -eq 1 ] && \ + [ -z "$(uci_get firewall adblock_dns_53)" ] && [ "$(/etc/init.d/firewall enabled; printf "%u" ${?})" -eq 0 ] then for port in ${port_list} do @@ -406,7 +429,7 @@ f_extconf() uci_set firewall "adblock_dns_${port}" "dest_port" "${port}" uci_set firewall "adblock_dns_${port}" "target" "DNAT" done - elif [ -n "$(uci_get firewall adblock_dns_53)" ] && ([ ${adb_enabled} -eq 0 ] || [ ${adb_forcedns} -eq 0 ]) + elif [ -n "$(uci_get firewall adblock_dns_53)" ] && { [ "${adb_enabled}" -eq 0 ] || [ "${adb_forcedns}" -eq 0 ]; } then for port in ${port_list} do @@ -422,14 +445,20 @@ f_dnsup() { local dns_up cache_util cache_rc cnt=0 - if [ ${adb_dnsflush} -eq 0 ] && [ ${adb_enabled} -eq 1 ] && [ "${adb_rc}" -eq 0 ] + if [ "${adb_dnsflush}" -eq 0 ] && [ "${adb_enabled}" -eq 1 ] && [ "${adb_rc}" -eq 0 ] then case "${adb_dns}" in - dnsmasq) - killall -q -HUP "${adb_dns}" - cache_rc=${?} + "dnsmasq") + if [ "${adb_dnsvariant}" = "nxdomain" ] + then + killall -q -HUP "${adb_dns}" + cache_rc=${?} + elif [ "${adb_dnsvariant% *}" = "null" ] + then + "/etc/init.d/${adb_dns}" restart >/dev/null 2>&1 + fi ;; - unbound) + "unbound") cache_util="$(command -v unbound-control)" if [ -x "${cache_util}" ] && [ -d "${adb_tmpdir}" ] && [ -f "${adb_dnsdir}"/unbound.conf ] then @@ -437,12 +466,12 @@ f_dnsup() fi "/etc/init.d/${adb_dns}" restart >/dev/null 2>&1 ;; - kresd) + "kresd") cache_util="keep_cache" "/etc/init.d/${adb_dns}" restart >/dev/null 2>&1 cache_rc=${?} ;; - named) + "named") cache_util="$(command -v rndc)" if [ -x "${cache_util}" ] && [ -f /etc/bind/rndc.conf ] then @@ -452,30 +481,25 @@ f_dnsup() "/etc/init.d/${adb_dns}" restart >/dev/null 2>&1 fi ;; - *) - "/etc/init.d/${adb_dns}" restart >/dev/null 2>&1 - ;; esac - else - "/etc/init.d/${adb_dns}" restart >/dev/null 2>&1 fi adb_rc=1 - while [ ${cnt} -le 10 ] + while [ "${cnt}" -le 10 ] do dns_up="$(ubus -S call service list "{\"name\":\"${adb_dns}\"}" | jsonfilter -l1 -e "@[\"${adb_dns}\"].instances.*.running")" if [ "${dns_up}" = "true" ] then case "${adb_dns}" in - unbound) + "unbound") cache_util="$(command -v unbound-control)" if [ -x "${cache_util}" ] && [ -d "${adb_tmpdir}" ] && [ -s "${adb_tmpdir}"/adb_cache.dump ] then - while [ ${cnt} -le 10 ] + while [ "${cnt}" -le 10 ] do "${cache_util}" -c "${adb_dnsdir}"/unbound.conf load_cache < "${adb_tmpdir}"/adb_cache.dump >/dev/null 2>&1 cache_rc=${?} - if [ ${cache_rc} -eq 0 ] + if [ "${cache_rc}" -eq 0 ] then break fi @@ -485,6 +509,7 @@ f_dnsup() fi ;; esac + sleep 1 adb_rc=0 break fi @@ -499,35 +524,69 @@ f_dnsup() # f_list() { - local file mode="${1}" in_rc="${adb_rc}" + local file name tmp_file="${adb_tmpfile}" mode="${1}" in_rc="${adb_rc}" case "${mode}" in - backup) + "blacklist") + if [ -s "${adb_blacklist}" ] + then + src_name="${mode}" + adb_blacklist_rset="/^([[:alnum:]_-]+\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}" + awk "${adb_blacklist_rset}" "${adb_blacklist}" > "${adb_tmpfile}"."${src_name}" + fi + ;; + "whitelist") + if [ -s "${adb_whitelist}" ] + then + src_name="${mode}" + adb_whitelist_rset="/^([[:alnum:]_-]+\\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}" + awk "${adb_whitelist_rset}" "${adb_whitelist}" > "${adb_tmpdir}"/tmp.raw."${src_name}" + + adb_whitelist_rset="/^([[:alnum:]_-]+\\.)+[[:alpha:]]+([[:space:]]|$)/{gsub(\"\\\.\",\"\\\.\",\$1);print tolower(\"^\"\$1\"\\\|\\\.\"\$1)}" + awk "${adb_whitelist_rset}" "${adb_tmpdir}"/tmp.raw."${src_name}" > "${adb_tmpdir}"/tmp.rem."${src_name}" + + if [ -n "${adb_dnsallow}" ] + then + eval "${adb_dnsallow}" "${adb_tmpdir}"/tmp.raw."${src_name}" > "${adb_tmpdir}"/tmp.add."${src_name}" + fi + fi + ;; + "backup") if [ -d "${adb_backupdir}" ] then gzip -cf "${adb_tmpfile}" 2>/dev/null > "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" adb_rc=${?} fi ;; - restore) - if [ -d "${adb_backupdir}" ] && [ -f "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" ] + "restore") + if [ -d "${adb_backupdir}" ] then - gunzip -cf "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" 2>/dev/null > "${adb_tmpfile}" + if [ -n "${src_name}" ] && [ -f "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" ] + then + zcat "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" 2>/dev/null > "${adb_tmpfile}" + else + for file in "${adb_backupdir}/${adb_dnsprefix}."*".gz" + do + name="${file##*/}" + name="${name%.*}" + zcat "${file}" 2>/dev/null > "${adb_tmpfile}"."${name}" + done + fi adb_rc=${?} fi ;; - remove) + "remove") if [ -d "${adb_backupdir}" ] then rm -f "${adb_backupdir}/${adb_dnsprefix}.${src_name}.gz" fi adb_rc=${?} ;; - merge) + "merge") for file in "${adb_tmpfile}".* do cat "${file}" 2>/dev/null >> "${adb_tmpdir}/${adb_dnsfile}" - if [ ${?} -ne 0 ] + if [ "${?}" -ne 0 ] then adb_rc=${?} break @@ -536,24 +595,24 @@ f_list() done adb_tmpfile="${adb_tmpdir}/${adb_dnsfile}" ;; - final) + "final") > "${adb_dnsdir}/${adb_dnsfile}" - if [ -s "${adb_tmpdir}/tmp.add_whitelist" ] + if [ -s "${adb_tmpdir}/tmp.add.whitelist" ] then - cat "${adb_tmpdir}/tmp.add_whitelist" >> "${adb_dnsdir}/${adb_dnsfile}" + cat "${adb_tmpdir}/tmp.add.whitelist" >> "${adb_dnsdir}/${adb_dnsfile}" fi - if [ -s "${adb_tmpdir}/tmp.rem_whitelist" ] + if [ -s "${adb_tmpdir}/tmp.rem.whitelist" ] then - grep -vf "${adb_tmpdir}/tmp.rem_whitelist" "${adb_tmpdir}/${adb_dnsfile}" | eval "${adb_dnsdeny}" >> "${adb_dnsdir}/${adb_dnsfile}" + grep -vf "${adb_tmpdir}/tmp.rem.whitelist" "${adb_tmpdir}/${adb_dnsfile}" | eval "${adb_dnsdeny}" >> "${adb_dnsdir}/${adb_dnsfile}" else eval "${adb_dnsdeny}" "${adb_tmpdir}/${adb_dnsfile}" >> "${adb_dnsdir}/${adb_dnsfile}" fi - if [ ${?} -eq 0 ] && [ -n "${adb_dnsheader}" ] + if [ "${?}" -eq 0 ] && [ -n "${adb_dnsheader}" ] then - printf '%s\n' "${adb_dnsheader}" | cat - "${adb_dnsdir}/${adb_dnsfile}" > "${adb_tmpdir}/${adb_dnsfile}" + printf "%s\\n" "${adb_dnsheader}" | cat - "${adb_dnsdir}/${adb_dnsfile}" > "${adb_tmpdir}/${adb_dnsfile}" mv -f "${adb_tmpdir}/${adb_dnsfile}" "${adb_dnsdir}/${adb_dnsfile}" fi adb_rc=${?} @@ -567,24 +626,23 @@ f_list() # f_tld() { - local cnt cnt_srt cnt_tld source="${1}" temp_src="${1}.src.gz" temp_tld="${1}.tld" tld_ok="false" + local cnt cnt_srt cnt_tld source="${1}" temp_tld="${1}.tld" tld_ok="false" - gzip -cf "${source}" 2>/dev/null > "${temp_src}" - if [ ${?} -eq 0 ] - then - cnt="$(wc -l 2>/dev/null < "${source}")" + cnt="$(wc -l 2>/dev/null < "${source}")" + if [ "${adb_dns}" != "dnsmasq" ] && [ "${adb_dnsvariant% *}" != "null" ] + then awk 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' "${source}" > "${temp_tld}" - if [ ${?} -eq 0 ] + if [ "${?}" -eq 0 ] then sort -u "${temp_tld}" > "${source}" - if [ ${?} -eq 0 ] + if [ "${?}" -eq 0 ] then cnt_srt="$(wc -l 2>/dev/null < "${source}")" awk '{if(NR==1){tld=$NF};while(getline){if($NF!~tld"\\."){print tld;tld=$NF}}print tld}' "${source}" > "${temp_tld}" - if [ ${?} -eq 0 ] + if [ "${?}" -eq 0 ] then awk 'BEGIN{FS="."}{for(f=NF;f>1;f--)printf "%s.",$f;print $1}' "${temp_tld}" > "${source}" - if [ ${?} -eq 0 ] + if [ "${?}" -eq 0 ] then rm -f "${temp_src}" "${temp_tld}" cnt_tld="$(wc -l 2>/dev/null < "${source}")" @@ -593,90 +651,72 @@ f_tld() fi fi fi + else + sort -u "${source}" > "${temp_tld}" + if [ "${?}" -eq 0 ] + then + mv -f "${temp_tld}" "${source}" + cnt_srt="$(wc -l 2>/dev/null < "${source}")" + tld_ok="true" + fi fi - if [ "${tld_ok}" = "false" ] then + unset cnt_srt cnt_tld rm -f "${temp_tld}" - gunzip -cf "${temp_src}" 2>/dev/null > "${source}" - if [ ${?} -ne 0 ] - then - rm -f "${temp_src}" - > "${source}" - fi + f_list blacklist + f_list whitelist + f_list restore + f_list merge + f_list final + cnt="$(wc -l 2>/dev/null < "${adb_tmpdir}"/"${adb_dnsfile}")" fi f_log "debug" "f_tld ::: source: ${source}, cnt: ${cnt:-"-"}, cnt_srt: ${cnt_srt:-"-"}, cnt_tld: ${cnt_tld:-"-"}, tld_ok: ${tld_ok}" } -# blocklist hash compare -# -f_hash() -{ - local hash hash_rc=1 - - if [ -x "${adb_hashutil}" ] && [ -f "${adb_dnsdir}/${adb_dnsfile}" ] - then - hash="$(${adb_hashutil} "${adb_dnsdir}/${adb_dnsfile}" 2>/dev/null | awk '{print $1}')" - if [ -z "${adb_hashold}" ] && [ -n "${hash}" ] - then - adb_hashold="${hash}" - elif [ -z "${adb_hashnew}" ] && [ -n "${hash}" ] - then - adb_hashnew="${hash}" - fi - if [ -n "${adb_hashold}" ] && [ -n "${adb_hashnew}" ] - then - if [ "${adb_hashold}" = "${adb_hashnew}" ] - then - hash_rc=0 - fi - adb_hashold="" - adb_hashnew="" - fi - fi - f_log "debug" "f_hash ::: hash_util: ${adb_hashutil}, hash: ${hash}, out_rc: ${hash_rc}" - return ${hash_rc} -} - # suspend/resume adblock processing # f_switch() { - local status cnt mode="${1}" + local status done="false" mode="${1}" json_load_file "${adb_rtfile}" >/dev/null 2>&1 - json_select "data" + json_select "data" >/dev/null 2>&1 json_get_var status "adblock_status" - json_get_var cnt "overall_domains" - + f_temp if [ "${mode}" = "suspend" ] && [ "${status}" = "enabled" ] then - if [ ${cnt%% *} -gt 0 ] && [ -s "${adb_dnsdir}/${adb_dnsfile}" ] + > "${adb_dnsdir}/${adb_dnsfile}" + if [ -n "${adb_dnsheader}" ] then - f_hash - cat "${adb_dnsdir}/${adb_dnsfile}" > "${adb_dnsdir}/.${adb_dnsfile}" - printf '%s\n' "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}" - f_hash + printf "%s\\n" "${adb_dnsheader}" > "${adb_dnsdir}/${adb_dnsfile}" fi + done="true" elif [ "${mode}" = "resume" ] && [ "${status}" = "paused" ] then - if [ ${cnt%% *} -gt 0 ] && [ -s "${adb_dnsdir}/.${adb_dnsfile}" ] - then - f_hash - cat "${adb_dnsdir}/.${adb_dnsfile}" > "${adb_dnsdir}/${adb_dnsfile}" - > "${adb_dnsdir}/.${adb_dnsfile}" - f_hash - fi + f_list blacklist + f_list whitelist + f_list restore + f_list merge + f_tld "${adb_tmpdir}"/"${adb_dnsfile}" + f_list final + done="true" fi - if [ ${?} -eq 1 ] + if [ "${done}" = "true" ] then - f_temp + if [ "${mode}" = "suspend" ] + then + f_bgserv "stop" + fi f_dnsup + if [ "${mode}" = "resume" ] + then + f_bgserv "start" + fi f_jsnup "${mode}" f_log "info" "${mode} adblock processing" - f_rmtemp - exit 0 fi + f_rmtemp } # query blocklist for certain (sub-)domains @@ -687,54 +727,65 @@ f_query() if [ -z "${domain}" ] || [ "${domain}" = "${tld}" ] then - printf '%s\n' "::: invalid domain input, please submit a single domain, e.g. 'doubleclick.net'" + printf "%s\\n" "::: invalid domain input, please submit a single domain, e.g. 'doubleclick.net'" else case "${adb_dns}" in - dnsmasq) - prefix=".*[\/\.]" - suffix="(\/)" - field=2 + "dnsmasq") + if [ "${adb_dnsvariant}" = "nxdomain" ] + then + prefix=".*[\\/\\.]" + suffix="(\\/)" + field=2 + elif [ "${adb_dnsvariant% *}" = "null" ] + then + prefix=".*[\\t\\.]" + suffix="" + field=2 + fi ;; - unbound) - prefix=".*[\"\.]" + "unbound") + prefix=".*[\"\\.]" suffix="(static)" field=3 ;; - named) - prefix="[^\*].*[\.]" - suffix="( \.)" + "named") + prefix="[^\\*].*[\\.]" + suffix="( \\.)" field=1 ;; - kresd) - prefix="[^\*].*[\.]" - suffix="( \.)" - field=1 - ;; - dnscrypt-proxy) - prefix=".*[\.]" - suffix="" + "kresd") + prefix="[^\\*].*[\\.]" + suffix="( \\.)" field=1 ;; esac - while [ "${domain}" != "${tld}" ] - do - search="${domain//./\\.}" - search="${search//[+*~%\$&\"\']/}" - result="$(awk -F '/|\"| ' "/^(${search}|${prefix}+${search}.*${suffix}$)/{i++;{printf(\" + %s\n\",\$${field})};if(i>9){printf(\" + %s\n\",\"[...]\");exit}}" "${adb_dnsdir}/${adb_dnsfile}")" - printf '%s\n%s\n%s\n' ":::" "::: results for domain '${domain}'" ":::" - printf '%s\n' "${result:-" - no match"}" - domain="${tld}" - tld="${domain#*.}" - done - - if [ ${adb_backup} -eq 1 ] && [ -d "${adb_backupdir}" ] + if [ "${adb_dnsfilereset}" = "false" ] + then + while [ "${domain}" != "${tld}" ] + do + search="${domain//./\\.}" + search="${search//[+*~%\$&\"\']/}" + result="$(awk -F '/|\"|\t| ' "/^(${prefix}+${search}.*${suffix}$)/{i++;{printf(\" + %s\\n\",\$${field})};if(i>9){printf(\" + %s\\n\",\"[...]\");exit}}" "${adb_dnsdir}/${adb_dnsfile}")" + printf "%s\\n%s\\n%s\\n" ":::" "::: results for domain '${domain}' in active blocklist" ":::" + printf "%s\n\n" "${result:-" - no match"}" + domain="${tld}" + tld="${domain#*.}" + done + fi + if [ -d "${adb_backupdir}" ] then search="${1//./\\.}" search="${search//[+*~%\$&\"\']/}" - printf '%s\n%s\n%s\n' ":::" "::: results for domain '${1}' in backups" ":::" - for file in ${adb_backupdir}/${adb_dnsprefix}.*.gz + printf "%s\\n%s\\n%s\\n" ":::" "::: results for domain '${1}' in backups and black-/whitelist" ":::" + for file in "${adb_backupdir}"/"${adb_dnsprefix}".*.gz "${adb_blacklist}" "${adb_whitelist}" do - zcat "${file}" 2>/dev/null | awk -v f="${file##*/}" "/^($search|.*\.${search})/{i++;{printf(\" + %-30s%s\n\",f,\$1)};if(i>=3){printf(\" + %-30s%s\n\",f,\"[...]\");exit}}" + suffix="${file##*.}" + if [ "${suffix}" = "gz" ] + then + zcat "${file}" 2>/dev/null | awk -v f="${file##*/}" "/^($search|.*\\.${search})/{i++;{printf(\" + %-30s%s\\n\",f,\$1)};if(i>=3){printf(\" + %-30s%s\\n\",f,\"[...]\");exit}}" + else + cat "${file}" 2>/dev/null | awk -v f="${file##*/}" "/^($search|.*\\.${search})/{i++;{printf(\" + %-30s%s\\n\",f,\$1)};if(i>=3){printf(\" + %-30s%s\\n\",f,\"[...]\");exit}}" + fi done fi fi @@ -744,9 +795,9 @@ f_query() # f_jsnup() { - local run_time bg_pid status="${1:-"enabled"}" mode="normal mode" + local run_time bg_pid status="${1:-"enabled"}" - if [ ${adb_rc} -gt 0 ] + if [ "${adb_rc}" -gt 0 ] then status="error" run_time="$(/bin/date "+%d.%m.%Y %H:%M:%S")" @@ -763,14 +814,10 @@ f_jsnup() then status="" fi - if [ ${adb_backup_mode} -eq 1 ] - then - mode="backup mode" - fi json_load_file "${adb_rtfile}" >/dev/null 2>&1 json_select "data" >/dev/null 2>&1 - if [ ${?} -eq 0 ] + if [ "${?}" -eq 0 ] then if [ -z "${adb_fetchinfo}" ] then @@ -793,21 +840,23 @@ f_jsnup() json_add_object "data" json_add_string "adblock_status" "${status:-"enabled"}" json_add_string "adblock_version" "${adb_ver}" - json_add_string "overall_domains" "${adb_cnt:-0} (${mode})" + json_add_string "overall_domains" "${adb_cnt:-0}" json_add_string "fetch_utility" "${adb_fetchinfo:-"-"}" - json_add_string "dns_backend" "${adb_dns} (${adb_dnsdir})" + json_add_string "dns_backend" "${adb_dns}, ${adb_dnsdir}" + json_add_string "dns_variant" "${adb_dnsvariant}, ${adb_dnsfilereset:-"false"}" + json_add_string "backup_dir" "${adb_backupdir}" json_add_string "last_rundate" "${run_time:-"-"}" json_add_string "system_release" "${adb_sysver}" json_close_object json_dump > "${adb_rtfile}" - if [ ${adb_notify} -eq 1 ] && [ -x /etc/adblock/adblock.notify ] && \ - ([ "${status}" = "error" ] || ([ "${status}" = "enabled" ] && [ ${adb_cnt} -le ${adb_notifycnt} ])) + if [ ${adb_mail} -eq 1 ] && [ -x "${adb_mailservice}" ] && \ + { [ "${status}" = "error" ] || { [ "${status}" = "enabled" ] && [ "${adb_cnt}" -le "${adb_mcnt}" ]; } } then - (/etc/adblock/adblock.notify >/dev/null 2>&1)& + ("${adb_mailservice}" >/dev/null 2>&1)& bg_pid=${!} fi - f_log "debug" "f_jsnup ::: status: ${status:-"-"}, mode: ${mode}, cnt: ${adb_cnt}, notify: ${adb_notify}, notify_cnt: ${adb_notifycnt}, notify_pid: ${bg_pid:-"-"}" + f_log "debug" "f_jsnup ::: status: ${status:-"-"}, cnt: ${adb_cnt}, mail: ${adb_mail}, mail_service: ${adb_mailservice}, mail_cnt: ${adb_mcnt}, mail_pid: ${bg_pid:-"-"}" } # write to syslog @@ -816,7 +865,7 @@ f_log() { local class="${1}" log_msg="${2}" - if [ -n "${log_msg}" ] && ([ "${class}" != "debug" ] || [ ${adb_debug} -eq 1 ]) + if [ -n "${log_msg}" ] && { [ "${class}" != "debug" ] || [ ${adb_debug} -eq 1 ]; } then logger -p "${class}" -t "adblock-${adb_ver}[${$}]" "${log_msg}" if [ "${class}" = "err" ] @@ -829,6 +878,24 @@ f_log() fi } +# start ubus monitor service to trace dns backend events +# +f_bgserv() +{ + local bg_pid status="${1}" + + bg_pid="$(pgrep -f "^/bin/sh ${adb_ubusservice}|^/bin/ubus -S -M r -m invoke monitor|^grep -qF \"method\":\"set\",\"data\":\\{\"name\":\"${adb_dns}\"" | awk '{ORS=" "; print $1}')" + if [ -z "${bg_pid}" ] && [ "${status}" = "start" ] \ + && [ -x "${adb_ubusservice}" ] && [ "${adb_dnsfilereset}" = "true" ] + then + ( "${adb_ubusservice}" &) + elif [ -n "${bg_pid}" ] && [ "${status}" = "stop" ] + then + kill -HUP ${bg_pid} + fi + f_log "debug" "f_bgserv ::: status: ${status:-"-"}, bg_pid: ${bg_pid:-"-"}, dns_filereset: ${adb_dnsfilereset:-"-"}, ubus_service: ${adb_ubusservice:-"-"}" +} + # main function for blocklist processing # f_main() @@ -839,57 +906,25 @@ f_main() mem_free="$(awk '/^MemFree/ {print int($2/1000)}' "/proc/meminfo" 2>/dev/null)" tmp_load="${adb_tmpload}" tmp_file="${adb_tmpfile}" - > "${adb_dnsdir}/.${adb_dnsfile}" - > "${adb_tmpdir}/tmp.raw_whitelist" - > "${adb_tmpdir}/tmp.add_whitelist" - > "${adb_tmpdir}/tmp.rem_whitelist" - f_log "debug" "f_main ::: dns: ${adb_dns}, fetch_util: ${adb_fetchinfo}, backup: ${adb_backup}, backup_mode: ${adb_backup_mode}, dns_jail: ${adb_jail}, force_dns: ${adb_forcedns}, mem_total: ${mem_total:-0}, mem_free: ${mem_free:-0}, max_queue: ${adb_maxqueue}" - - # prepare whitelist entries - # - if [ -s "${adb_whitelist}" ] - then - adb_whitelist_rset="/^([[:alnum:]_-]+\.)+[[:alpha:]]+([[:space:]]|$)/{print tolower(\$1)}" - awk "${adb_whitelist_rset}" "${adb_whitelist}" > "${adb_tmpdir}/tmp.raw_whitelist" - f_tld "${adb_tmpdir}/tmp.raw_whitelist" - - adb_whitelist_rset="/^([[:alnum:]_-]+\.)+[[:alpha:]]+([[:space:]]|$)/{gsub(\"\\\.\",\"\\\.\",\$1);print tolower(\"^\"\$1\"\\\|\\\.\"\$1)}" - awk "${adb_whitelist_rset}" "${adb_tmpdir}/tmp.raw_whitelist" > "${adb_tmpdir}/tmp.rem_whitelist" - - if [ -n "${adb_dnsallow}" ] - then - eval "${adb_dnsallow}" "${adb_tmpdir}/tmp.raw_whitelist" > "${adb_tmpdir}/tmp.add_whitelist" - fi - fi - - # build 'dnsjail' list - # - if [ ${adb_jail} -eq 1 ] - then - cat "${adb_tmpdir}/tmp.add_whitelist" > "/tmp/${adb_dnsjail}" - printf '%s\n' "${adb_dnshalt}" >> "/tmp/${adb_dnsjail}" - if [ -n "${adb_dnsheader}" ] - then - printf '%s\n' "${adb_dnsheader}" | cat - "/tmp/${adb_dnsjail}" > "${adb_tmpdir}/tmp.dnsjail" - cat "${adb_tmpdir}/tmp.dnsjail" > "/tmp/${adb_dnsjail}" - fi - fi - + f_log "debug" "f_main ::: dns: ${adb_dns}, fetch_util: ${adb_fetchinfo}, force_dns: ${adb_forcedns}, mem_total: ${mem_total:-0}, mem_free: ${mem_free:-0}, max_queue: ${adb_maxqueue}" + # main loop # + f_list blacklist + f_list whitelist for src_name in ${adb_sources} do - enabled="$(eval printf '%s' \"\${enabled_${src_name}\}\")" - src_url="$(eval printf '%s' \"\${adb_src_${src_name}\}\")" - src_rset="$(eval printf '%s' \"\${adb_src_rset_${src_name}\}\")" - src_cat="$(eval printf '%s' \"\${adb_src_cat_${src_name}\}\")" - adb_tmpload="${tmp_load}.${src_name}" - adb_tmpfile="${tmp_file}.${src_name}" + enabled="$(eval printf "%s" \"\$\{enabled_${src_name}\}\")" + src_url="$(eval printf "%s" \"\$\{adb_src_${src_name}\}\")" + src_rset="$(eval printf "%s" \"\$\{adb_src_rset_${src_name}\}\")" + src_cat="$(eval printf "%s" \"\$\{adb_src_cat_${src_name}\}\")" + adb_tmpload="${tmp_load}"."${src_name}" + adb_tmpfile="${tmp_file}"."${src_name}" # basic pre-checks # f_log "debug" "f_main ::: name: ${src_name}, enabled: ${enabled}" - if [ "${enabled}" != "1" ] || [ -z "${src_url}" ] || [ -z "${src_rset}" ] + if [ "${enabled}" != "1" ] || [ -f "${src_url}" ] || [ -z "${src_url}" ] || [ -z "${src_rset}" ] then f_list remove continue @@ -897,10 +932,10 @@ f_main() # backup mode # - if [ ${adb_backup_mode} -eq 1 ] && [ "${adb_action}" = "start" ] && [ "${src_name}" != "blacklist" ] + if [ "${adb_action}" = "start" ] then f_list restore - if [ ${adb_rc} -eq 0 ] && [ -s "${adb_tmpfile}" ] + if [ "${adb_rc}" -eq 0 ] && [ -s "${adb_tmpfile}" ] then continue fi @@ -908,76 +943,47 @@ f_main() # download queue processing # - if [ "${src_name}" = "blacklist" ] - then - if [ -s "${src_url}" ] - then - ( - src_log="$(cat "${src_url}" > "${adb_tmpload}" 2>&1)" - adb_rc=${?} - if [ ${adb_rc} -eq 0 ] && [ -s "${adb_tmpload}" ] - then - awk "${src_rset}" "${adb_tmpload}" 2>/dev/null > "${adb_tmpfile}" - adb_rc=${?} - if [ ${adb_rc} -eq 0 ] && [ -s "${adb_tmpfile}" ] - then - rm -f "${adb_tmpload}" - f_list download - fi - else - src_log="$(printf '%s' "${src_log}" | awk '{ORS=" ";print $0}')" - f_log "debug" "f_main ::: name: ${src_name}, url: ${src_url}, rc: ${adb_rc}, log: ${src_log:-"-"}" - fi - ) & - else - continue - fi - elif [ -n "${src_cat}" ] + if [ -n "${src_cat}" ] then ( - src_arc="${adb_tmpdir}/${src_url##*/}" + src_arc="${adb_tmpdir}"/"${src_url##*/}" src_log="$("${adb_fetchutil}" ${adb_fetchparm} "${src_arc}" "${src_url}" 2>&1)" adb_rc=${?} - if [ ${adb_rc} -eq 0 ] && [ -s "${src_arc}" ] + if [ "${adb_rc}" -eq 0 ] && [ -s "${src_arc}" ] then list="$(tar -tzf "${src_arc}")" - suffix="$(eval printf '%s' \"\${adb_src_suffix_${src_name}:-\"domains\"\}\")" + suffix="$(eval printf "%s" \"\$\{adb_src_suffix_${src_name}:-\"domains\"\}\")" for cat in ${src_cat} do - entry="$(printf '%s' "${list}" | grep -E "[\^/]+${cat}/${suffix}")" + entry="$(printf "%s" "${list}" | grep -E "[\\^/]+${cat}/${suffix}")" if [ -n "${entry}" ] then tar -xOzf "${src_arc}" "${entry}" >> "${adb_tmpload}" adb_rc=${?} - if [ ${adb_rc} -ne 0 ] + if [ "${adb_rc}" -ne 0 ] then break fi fi done else - src_log="$(printf '%s' "${src_log}" | awk '{ORS=" ";print $0}')" + src_log="$(printf "%s" "${src_log}" | awk '{ORS=" ";print $0}')" f_log "debug" "f_main ::: name: ${src_name}, url: ${src_url}, rc: ${adb_rc}, log: ${src_log:-"-"}" fi - if [ ${adb_rc} -eq 0 ] && [ -s "${adb_tmpload}" ] + if [ "${adb_rc}" -eq 0 ] && [ -s "${adb_tmpload}" ] then rm -f "${src_arc}" awk "${src_rset}" "${adb_tmpload}" 2>/dev/null > "${adb_tmpfile}" adb_rc=${?} - if [ ${adb_rc} -eq 0 ] && [ -s "${adb_tmpfile}" ] + if [ "${adb_rc}" -eq 0 ] && [ -s "${adb_tmpfile}" ] then rm -f "${adb_tmpload}" f_list download - if [ ${adb_backup} -eq 1 ] - then - f_list backup - fi - elif [ ${adb_backup} -eq 1 ] - then + f_list backup + else f_list restore fi - elif [ ${adb_backup} -eq 1 ] - then + else f_list restore fi ) & @@ -989,34 +995,27 @@ f_main() then awk "${src_rset}" "${adb_tmpload}" 2>/dev/null > "${adb_tmpfile}" adb_rc=${?} - if [ ${adb_rc} -eq 0 ] && [ -s "${adb_tmpfile}" ] + if [ "${adb_rc}" -eq 0 ] && [ -s "${adb_tmpfile}" ] then rm -f "${adb_tmpload}" f_list download - if [ ${adb_backup} -eq 1 ] - then - f_list backup - fi - elif [ ${adb_backup} -eq 1 ] - then + f_list backup + else f_list restore fi else - src_log="$(printf '%s' "${src_log}" | awk '{ORS=" ";print $0}')" + src_log="$(printf "%s" "${src_log}" | awk '{ORS=" ";print $0}')" f_log "debug" "f_main ::: name: ${src_name}, url: ${src_url}, rc: ${adb_rc}, log: ${src_log:-"-"}" - if [ ${adb_backup} -eq 1 ] - then - f_list restore - fi + f_list restore fi ) & fi - hold=$(( cnt % adb_maxqueue )) - if [ ${hold} -eq 0 ] + hold=$((cnt%adb_maxqueue)) + if [ "${hold}" -eq 0 ] then wait fi - cnt=$(( cnt + 1 )) + cnt=$((cnt+1)) done # list merge @@ -1026,26 +1025,28 @@ f_main() adb_tmpfile="${tmp_file}" f_list merge - # overall sort and conditional dns restart + # overall sort and dns restart # - f_hash - if [ -s "${adb_tmpdir}/${adb_dnsfile}" ] + if [ -s "${adb_tmpdir}"/"${adb_dnsfile}" ] then - f_tld "${adb_tmpdir}/${adb_dnsfile}" + f_tld "${adb_tmpdir}"/"${adb_dnsfile}" f_list final else - > "${adb_dnsdir}/${adb_dnsfile}" - fi - chown "${adb_dnsuser}" "${adb_dnsdir}/${adb_dnsfile}" 2>/dev/null - f_hash - if [ ${?} -eq 1 ] - then - f_dnsup + > "${adb_dnsdir}"/"${adb_dnsfile}" fi + chown "${adb_dnsuser}" "${adb_dnsdir}"/"${adb_dnsfile}" 2>/dev/null + f_dnsup f_jsnup - if [ ${?} -eq 0 ] + if [ "${?}" -eq 0 ] then - f_log "info" "blocklist with overall ${adb_cnt} domains loaded successfully (${adb_sysver})" + if [ "${adb_dnsfilereset}" = "true" ] + then + f_bgserv "start" + > "${adb_dnsdir}"/"${adb_dnsfile}" + f_log "info" "blocklist with overall ${adb_cnt} domains loaded successfully and reset afterwards (${adb_sysver})" + else + f_log "info" "blocklist with overall ${adb_cnt} domains loaded successfully (${adb_sysver})" + fi else f_log "err" "dns backend restart with active blocklist failed" fi @@ -1057,20 +1058,20 @@ f_main() # f_report() { - local bg_pid total blocked percent rep_clients rep_domains rep_blocked index hold ports cnt=0 search="${1}" count="${2}" filter="${3}" print="${4}" + local bg_pid status total blocked percent rep_clients rep_domains rep_blocked index hold ports cnt=0 search="${1}" count="${2}" filter="${3}" print="${4}" - if [ ${adb_report} -eq 1 ] && [ ! -x "${adb_reputil}" ] + if [ "${adb_report}" -eq 1 ] && [ ! -x "${adb_reputil}" ] then f_log "info" "Please install the package 'tcpdump' or 'tcpdump-mini' to use the adblock reporting feature!" - elif [ ${adb_report} -eq 0 ] && [ "${adb_action}" = "report" ] + elif [ "${adb_report}" -eq 0 ] && [ "${adb_action}" = "report" ] then f_log "info" "Please enable the extra option 'adb_report' to use the adblock reporting feature!" fi if [ -x "${adb_reputil}" ] then - bg_pid="$(pgrep -f "^${adb_reputil}.*adb_report\.pcap$" | awk '{ORS=" "; print $1}')" - if [ ${adb_report} -eq 0 ] || ([ -n "${bg_pid}" ] && ([ "${adb_action}" = "stop" ] || [ "${adb_action}" = "restart" ])) + bg_pid="$(pgrep -f "^${adb_reputil}.*adb_report\\.pcap$" | awk '{ORS=" "; print $1}')" + if [ "${adb_report}" -eq 0 ] || { [ -n "${bg_pid}" ] && { [ "${adb_action}" = "stop" ] || [ "${adb_action}" = "restart" ]; } } then if [ -n "${bg_pid}" ] then @@ -1084,7 +1085,7 @@ f_report() fi fi - if [ -x "${adb_reputil}" ] && [ ${adb_report} -eq 1 ] + if [ -x "${adb_reputil}" ] && [ "${adb_report}" -eq 1 ] then if [ -z "${bg_pid}" ] && [ "${adb_action}" != "report" ] && [ "${adb_action}" != "stop" ] then @@ -1097,53 +1098,53 @@ f_report() ports="${ports} or port ${port}" fi done - ("${adb_reputil}" -nn -s0 -l -i ${adb_repiface} ${ports} -C${adb_repchunksize} -W${adb_repchunkcnt} -w "${adb_repdir}/adb_report.pcap" >/dev/null 2>&1 &) - bg_pid="$(pgrep -f "^${adb_reputil}.*adb_report\.pcap$" | awk '{ORS=" "; print $1}')" + ( "${adb_reputil}" -nn -s0 -l -i ${adb_repiface} ${ports} -C${adb_repchunksize} -W${adb_repchunkcnt} -w "${adb_repdir}"/adb_report.pcap >/dev/null 2>&1 & ) + bg_pid="$(pgrep -f "^${adb_reputil}.*adb_report\\.pcap$" | awk '{ORS=" "; print $1}')" fi if [ "${adb_action}" = "report" ] && [ "${filter}" = "false" ] then - > "${adb_repdir}/adb_report.raw" + > "${adb_repdir}"/adb_report.raw for file in "${adb_repdir}"/adb_report.pcap* do ( - "${adb_reputil}" -tttt -r $file 2>/dev/null | \ - awk -v cnt=${cnt} '!/\.lan\. /&&/ A[\? ]+|NXDomain/{a=$1;b=substr($2,0,8);c=$4;sub(/\.[0-9]+$/,"",c); \ - d=cnt $7;e=$(NF-1);sub(/[0-9]\/[0-9]\/[0-9]/,"NX",e);sub(/\.$/,"",e);sub(/([0-9]{1,3}\.){3}[0-9]{1,3}/,"OK",e);printf("%s\t%s\t%s\t%s\t%s\n", a,b,c,d,e)}' >> "${adb_repdir}/adb_report.raw" + "${adb_reputil}" -tttt -r "${file}" 2>/dev/null | \ + awk -v cnt=${cnt} '!/\.lan\. /&&/ A[\? ]+|NXDomain|0\.0\.0\.0/{a=$1;b=substr($2,0,8);c=$4;sub(/\.[0-9]+$/,"",c); \ + d=cnt $7;sub(/\*$/,"",d);e=$(NF-1);sub(/[0-9]\/[0-9]\/[0-9]|0\.0\.0\.0/,"NX",e);sub(/\.$/,"",e);sub(/([0-9]{1,3}\.){3}[0-9]{1,3}/,"OK",e);printf("%s\t%s\t%s\t%s\t%s\n", a,b,c,d,e)}' >> "${adb_repdir}/adb_report.raw" )& - hold=$(( cnt % adb_maxqueue )) - if [ ${hold} -eq 0 ] + hold=$((cnt%adb_maxqueue)) + if [ "${hold}" -eq 0 ] then wait fi - cnt=$(( cnt + 1 )) + cnt=$((cnt+1)) done wait - if [ -s "${adb_repdir}/adb_report.raw" ] + if [ -s "${adb_repdir}"/adb_report.raw ] then awk '{printf("%s\t%s\t%s\t%s\t%s\t%s\n", $4,$5,$1,$2,$3,$4)}' "${adb_repdir}/adb_report.raw" | \ sort -ur | uniq -uf2 | awk '{currA=($6+0);currB=$6;currC=substr($6,length($6),1); \ if(reqA==currB){reqA=0;printf("%s\t%s\n",d,$2)}else if(currC=="+"){reqA=currA;d=$3"\t"$4"\t"$5"\t"$2}}' | sort -ur > "${adb_repdir}/adb_report" fi - if [ -s "${adb_repdir}/adb_report" ] + if [ -s "${adb_repdir}"/adb_report ] then - total="$(wc -l < ${adb_repdir}/adb_report)" - blocked="$(awk '{if($5=="NX")print $4}' ${adb_repdir}/adb_report | wc -l)" - percent="$(awk -v t=${total} -v b=${blocked} 'BEGIN{printf("%.2f %s\n",b/t*100, "%")}')" - rep_clients="$(awk '{print $3}' ${adb_repdir}/adb_report | sort | uniq -c | sort -r | awk '{ORS=" ";if(NR<=10) printf("%s_%s ",$1,$2)}')" - rep_domains="$(awk '{if($5!="NX")print $4}' ${adb_repdir}/adb_report | sort | uniq -c | sort -r | awk '{ORS=" ";if(NR<=10)printf("%s_%s ",$1,$2)}')" - rep_blocked="$(awk '{if($5=="NX")print $4}' ${adb_repdir}/adb_report | sort | uniq -c | sort -r | awk '{ORS=" ";if(NR<=10)printf("%s_%s ",$1,$2)}')" - - > "${adb_repdir}/adb_report.json" - json_load_file "${adb_repdir}/adb_report.json" >/dev/null 2>&1 + total="$(wc -l < "${adb_repdir}"/adb_report)" + blocked="$(awk '{if($5=="NX")print $4}' "${adb_repdir}"/adb_report | wc -l)" + percent="$(awk -v t="${total}" -v b="${blocked}" 'BEGIN{printf("%.2f %s\n",b/t*100, "%")}')" + rep_clients="$(awk '{print $3}' "${adb_repdir}"/adb_report | sort | uniq -c | sort -r | awk '{ORS=" ";if(NR<=10) printf("%s_%s ",$1,$2)}')" + rep_domains="$(awk '{if($5!="NX")print $4}' "${adb_repdir}"/adb_report | sort | uniq -c | sort -r | awk '{ORS=" ";if(NR<=10)printf("%s_%s ",$1,$2)}')" + rep_blocked="$(awk '{if($5=="NX")print $4}' "${adb_repdir}"/adb_report | sort | uniq -c | sort -r | awk '{ORS=" ";if(NR<=10)printf("%s_%s ",$1,$2)}')" + + > "${adb_repdir}"/adb_report.json + json_load_file "${adb_repdir}"/adb_report.json >/dev/null 2>&1 json_init json_add_object "data" - json_add_string "start_date" "$(awk 'END{printf("%s",$1)}' ${adb_repdir}/adb_report)" - json_add_string "start_time" "$(awk 'END{printf("%s",$2)}' ${adb_repdir}/adb_report)" - json_add_string "end_date" "$(awk 'NR==1{printf("%s",$1)}' ${adb_repdir}/adb_report)" - json_add_string "end_time" "$(awk 'NR==1{printf("%s",$2)}' ${adb_repdir}/adb_report)" + json_add_string "start_date" "$(awk 'END{printf("%s",$1)}' "${adb_repdir}"/adb_report)" + json_add_string "start_time" "$(awk 'END{printf("%s",$2)}' "${adb_repdir}"/adb_report)" + json_add_string "end_date" "$(awk 'NR==1{printf("%s",$1)}' "${adb_repdir}"/adb_report)" + json_add_string "end_time" "$(awk 'NR==1{printf("%s",$2)}' "${adb_repdir}"/adb_report)" json_add_string "total" "${total}" json_add_string "blocked" "${blocked}" json_add_string "percent" "${percent}" @@ -1175,29 +1176,29 @@ f_report() json_close_object done json_close_object - json_dump > "${adb_repdir}/adb_report.json" + json_dump > "${adb_repdir}"/adb_report.json fi - rm -f "${adb_repdir}/adb_report.raw" + rm -f "${adb_repdir}"/adb_report.raw fi - if [ -s "${adb_repdir}/adb_report" ] + if [ -s "${adb_repdir}"/adb_report ] then search="${search//./\\.}" search="${search//[+*~%\$&\"\' ]/}" - > "${adb_repdir}/adb_report.final" - awk "BEGIN{i=0}/(${search})/{i++;if(i<=${count}){printf \"%s\t%s\t%s\t%s\t%s\n\",\$1,\$2,\$3,\$4,\$5}}" "${adb_repdir}/adb_report" > "${adb_repdir}/adb_report.final" - if [ ! -s "${adb_repdir}/adb_report.final" ] + > "${adb_repdir}"/adb_report.final + awk "BEGIN{i=0}/(${search})/{i++;if(i<=${count}){printf \"%s\\t%s\\t%s\\t%s\\t%s\\n\",\$1,\$2,\$3,\$4,\$5}}" "${adb_repdir}"/adb_report > "${adb_repdir}"/adb_report.final + if [ ! -s "${adb_repdir}"/adb_report.final ] then - printf "%s\t%s\t%s\t%s\t%s\n" "-" "-" "-" "-" "-" > "${adb_repdir}/adb_report.final" + printf "%s\\t%s\\t%s\\t%s\\t%s\\n" "-" "-" "-" "-" "-" > "${adb_repdir}"/adb_report.final fi fi if [ "${print}" = "true" ] then - if [ -s "${adb_repdir}/adb_report.json" ] + if [ -s "${adb_repdir}"/adb_report.json ] then - printf "%s\n%s\n%s\n" ":::" "::: Adblock DNS-Query Report" ":::" - json_load_file "${adb_repdir}/adb_report.json" + printf "%s\\n%s\\n%s\\n" ":::" "::: Adblock DNS-Query Report" ":::" + json_load_file "${adb_repdir}"/adb_report.json json_select "data" json_get_keys keylist for key in ${keylist} @@ -1205,55 +1206,55 @@ f_report() json_get_var value "${key}" eval "${key}=\"${value}\"" done - printf " + %s\n + %s\n" "Start ::: ${start_date}, ${start_time}" "End ::: ${end_date}, ${end_time}" - printf " + %s\n + %s %s\n" "Total ::: ${total}" "Blocked ::: ${blocked}" "(${percent})" + printf " + %s\\n + %s\\n" "Start ::: ${start_date}, ${start_time}" "End ::: ${end_date}, ${end_time}" + printf " + %s\\n + %s %s\\n" "Total ::: ${total}" "Blocked ::: ${blocked}" "(${percent})" json_select ".." - if json_get_type Status "top_clients" && [ "${Status}" = "array" ] + if json_get_type status "top_clients" && [ "${status}" = "array" ] then - printf "%s\n%s\n%s\n" ":::" "::: Top 10 Clients" ":::" + printf "%s\\n%s\\n%s\\n" ":::" "::: Top 10 Clients" ":::" json_select "top_clients" index=1 - while json_get_type Status ${index} && [ "${Status}" = "object" ] + while json_get_type status "${index}" && [ "${status}" = "object" ] do - json_get_values client ${index} - printf " + %-9s::: %s\n" ${client} - index=$((index + 1)) + json_get_values client "${index}" + printf " + %-9s::: %s\\n" ${client} + index=$((index+1)) done fi json_select ".." - if json_get_type Status "top_domains" && [ "${Status}" = "array" ] + if json_get_type status "top_domains" && [ "${status}" = "array" ] then - printf "%s\n%s\n%s\n" ":::" "::: Top 10 Domains" ":::" + printf "%s\\n%s\\n%s\\n" ":::" "::: Top 10 Domains" ":::" json_select "top_domains" index=1 - while json_get_type Status ${index} && [ "${Status}" = "object" ] + while json_get_type status "${index}" && [ "${status}" = "object" ] do - json_get_values domain ${index} - printf " + %-9s::: %s\n" ${domain} - index=$((index + 1)) + json_get_values domain "${index}" + printf " + %-9s::: %s\\n" ${domain} + index=$((index+1)) done fi json_select ".." - if json_get_type Status "top_blocked" && [ "${Status}" = "array" ] + if json_get_type status "top_blocked" && [ "${status}" = "array" ] then - printf "%s\n%s\n%s\n" ":::" "::: Top 10 Blocked Domains" ":::" + printf "%s\\n%s\\n%s\\n" ":::" "::: Top 10 Blocked Domains" ":::" json_select "top_blocked" index=1 - while json_get_type Status ${index} && [ "${Status}" = "object" ] + while json_get_type status "${index}" && [ "${status}" = "object" ] do - json_get_values blocked ${index} - printf " + %-9s::: %s\n" ${blocked} - index=$((index + 1)) + json_get_values blocked "${index}" + printf " + %-9s::: %s\\n" ${blocked} + index=$((index+1)) done fi - if [ -s "${adb_repdir}/adb_report.final" ] + if [ -s "${adb_repdir}"/adb_report.final ] then - printf "%s\n%s\n%s\n" ":::" "::: Latest DNS Queries" ":::" - printf "%-15s%-15s%-45s%-50s%s\n" "Date" "Time" "Client" "Domain" "Answer" - awk '{printf "%-15s%-15s%-45s%-50s%s\n",$1,$2,$3,$4,$5}' "${adb_repdir}/adb_report.final" + printf "%s\\n%s\\n%s\\n" ":::" "::: Latest DNS Queries" ":::" + printf "%-15s%-15s%-45s%-50s%s\\n" "Date" "Time" "Client" "Domain" "Answer" + awk '{printf "%-15s%-15s%-45s%-50s%s\n",$1,$2,$3,$4,$5}' "${adb_repdir}"/adb_report.final fi else - printf "%s\n%s\n%s\n" ":::" "::: no reporting data available yet" ":::" + printf "%s\\n%s\\n%s\\n" ":::" "::: no reporting data available yet" ":::" fi fi fi @@ -1272,16 +1273,18 @@ fi # handle different adblock actions # -f_envload +f_load case "${adb_action}" in stop) + f_bgserv "stop" f_report "+" "50" "false" "false" f_rmdns ;; restart) + f_bgserv "stop" f_report "+" "50" "false" "false" f_rmdns - f_envcheck + f_env f_main ;; suspend) @@ -1297,8 +1300,9 @@ case "${adb_action}" in f_query "${2}" ;; start|reload) + f_bgserv "stop" f_report "+" "50" "false" "false" - f_envcheck + f_env f_main ;; esac diff --git a/net/banip/Makefile b/net/banip/Makefile index fbc23ef29..1ad420738 100644 --- a/net/banip/Makefile +++ b/net/banip/Makefile @@ -6,7 +6,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=banip -PKG_VERSION:=0.1.4 +PKG_VERSION:=0.1.5 PKG_RELEASE:=1 PKG_LICENSE:=GPL-3.0+ PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org> diff --git a/net/banip/files/README.md b/net/banip/files/README.md index 1df1f7cdc..dfa6c7d7e 100644 --- a/net/banip/files/README.md +++ b/net/banip/files/README.md @@ -14,8 +14,8 @@ IP address blocking is commonly used to protect against brute force attacks, pre * supports blocking by ASN numbers * supports blocking by iso country codes * supports local white & blacklist (IPv4, IPv6 & CIDR notation), located by default in /etc/banip/banip.whitelist and /etc/banip/banip.blacklist -* auto-add unsuccessful ssh login attempts to local blacklist -* auto-add the uplink subnet to local whitelist +* auto-add unsuccessful ssh login attempts to local blacklist (see 'ban_autoblacklist' option) +* auto-add the uplink subnet to local whitelist (see 'ban_autowhitelist' option) * per source configuration of SRC (incoming) and DST (outgoing) * integrated IPSet-Lookup * integrated RIPE-Lookup @@ -54,7 +54,7 @@ IP address blocking is commonly used to protect against brute force attacks, pre * ban\_iface => space separated list of WAN network interface(s)/device(s) used by banIP (default: automatically set by banIP ('ban_automatic')) * the following options apply to the 'extra' config section: - * ban\_debug => enable/disable banIP debug output (default: '0', disabled) + * ban\_debug => enable/disable banIP debug output (bool/default: '0', disabled) * ban\_nice => set the nice level of the banIP process and all sub-processes (int/default: '0', standard priority) * ban\_triggerdelay => additional trigger delay in seconds before banIP processing begins (int/default: '2') * ban\_backup => create compressed blocklist backups, they will be used in case of download errors or during startup in 'backup mode' (bool/default: '0', disabled) @@ -62,6 +62,8 @@ IP address blocking is commonly used to protect against brute force attacks, pre * ban\_backupboot => do not automatically update blocklists during startup, use their backups instead (bool/default: '0', disabled) * ban\_maxqueue => size of the download queue to handle downloads & IPSet processing in parallel (int/default: '8') * ban\_fetchparm => special config options for the download utility (default: not set) + * ban\_autoblacklist => store auto-addons temporary in ipset and permanently in local blacklist as well (bool/default: '1', enabled) + * ban\_autowhitelist => store auto-addons temporary in ipset and permanently in local whitelist as well (bool/default: '1', enabled) ## Examples **receive banIP runtime information:** diff --git a/net/banip/files/banip.conf b/net/banip/files/banip.conf index 4843fc2b0..08053a2fe 100644 --- a/net/banip/files/banip.conf +++ b/net/banip/files/banip.conf @@ -1,5 +1,3 @@ -# banIP configuration, for further information -# see 'https://github.com/openwrt/packages/blob/master/net/banip/files/README.md' config banip 'global' option ban_enabled '0' @@ -92,14 +90,6 @@ config source 'yoyo' option ban_src_ruletype 'src' option ban_src_on '0' -config source 'zeus' - option ban_src 'https://zeustracker.abuse.ch/blocklist.php?download=ipblocklist' - option ban_src_desc 'Zeus Tracker by abuse.ch (IPv4)' - option ban_src_rset '/^(([0-9]{1,3}\.){3}[0-9]{1,3})([[:space:]]|$)/{print \"add zeus \"\$1}' - option ban_src_settype 'ip' - option ban_src_ruletype 'src' - option ban_src_on '0' - config source 'sslbl' option ban_src 'https://sslbl.abuse.ch/blacklist/sslipblacklist.csv' option ban_src_desc 'SSL Blacklist by abuse.ch (IPv4)' diff --git a/net/banip/files/banip.init b/net/banip/files/banip.init index 9356c4df0..235bbfd9c 100755 --- a/net/banip/files/banip.init +++ b/net/banip/files/banip.init @@ -50,7 +50,6 @@ reload_service() stop_service() { rc_procd "${ban_script}" stop - rc_procd start_service } status() diff --git a/net/banip/files/banip.sh b/net/banip/files/banip.sh index 8d1ae1e2a..c546e8724 100755 --- a/net/banip/files/banip.sh +++ b/net/banip/files/banip.sh @@ -10,7 +10,7 @@ # LC_ALL=C PATH="/usr/sbin:/usr/bin:/sbin:/bin" -ban_ver="0.1.4" +ban_ver="0.1.5" ban_sysver="unknown" ban_enabled=0 ban_automatic="1" @@ -21,6 +21,8 @@ ban_backup=0 ban_backupboot=0 ban_backupdir="/mnt" ban_maxqueue=4 +ban_autoblacklist=1 +ban_autowhitelist=1 ban_fetchutil="uclient-fetch" ban_ip="$(command -v ip)" ban_ipt="$(command -v iptables)" @@ -623,7 +625,11 @@ f_main() if [ -z "$(grep -F "${ip}" "${src_url}")" ] then printf '%s\n' "${ip}" >> "${tmp_load}" - printf '%s\n' "${ip}" >> "${src_url}" + if { [ "${src_name//_*/}" = "blacklist" ] && [ "${ban_autoblacklist}" -eq 1 ]; } || \ + { [ "${src_name//_*/}" = "whitelist" ] && [ "${ban_autowhitelist}" -eq 1 ]; } + then + printf '%s\n' "${ip}" >> "${src_url}" + fi fi done elif [ -n "${src_cat}" ] diff --git a/net/clamav/Makefile b/net/clamav/Makefile index bc399827d..85631725a 100644 --- a/net/clamav/Makefile +++ b/net/clamav/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=clamav PKG_VERSION:=0.101.2 -PKG_RELEASE:=2 +PKG_RELEASE:=3 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.clamav.net/downloads/production/ @@ -23,6 +23,7 @@ PKG_CPE_ID:=cpe:/a:clamav:clamav PKG_BUILD_PARALLEL:=1 PKG_INSTALL:=1 +PKG_FIXUP:=autoreconf include $(INCLUDE_DIR)/uclibc++.mk include $(INCLUDE_DIR)/package.mk @@ -74,7 +75,6 @@ CONFIGURE_ARGS += \ --with-openssl="$(STAGING_DIR)/usr/" \ --with-pcre="$(STAGING_DIR)/usr/" \ --with-zlib="$(STAGING_DIR)/usr/" \ - --without-xml \ --without-iconv \ --without-libncurses-prefix @@ -93,18 +93,14 @@ define Package/clamav/install $(INSTALL_DIR) $(1)/usr/sbin $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/sbin/clamd $(1)/usr/sbin/ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/sbin/clamav-milter $(1)/usr/sbin/ - $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/clamav-config $(1)/usr/sbin/ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/clambc $(1)/usr/sbin/ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/clamconf $(1)/usr/sbin/ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/clamdscan $(1)/usr/sbin/ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/clamscan $(1)/usr/sbin/ $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/sigtool $(1)/usr/sbin/ - $(INSTALL_DIR) $(1)/usr/include - $(CP) $(PKG_INSTALL_DIR)/usr/include/clamav.h $(1)/usr/include/ - $(INSTALL_DIR) $(1)/usr/lib - $(CP) $(PKG_INSTALL_DIR)/usr/lib*/* $(1)/usr/lib/ + $(CP) $(PKG_INSTALL_DIR)/usr/lib*/lib*.so.* $(1)/usr/lib/ $(INSTALL_DIR) $(1)/etc/config $(INSTALL_CONF) ./files/clamav.config $(1)/etc/config/clamav diff --git a/net/clamav/patches/002-Avoid-libxml-checks-if-disable-xml-is-used.patch b/net/clamav/patches/002-Avoid-libxml-checks-if-disable-xml-is-used.patch new file mode 100644 index 000000000..3564c31f9 --- /dev/null +++ b/net/clamav/patches/002-Avoid-libxml-checks-if-disable-xml-is-used.patch @@ -0,0 +1,167 @@ +From de943f313fa5c17bf9cbd560a7578796991b24b5 Mon Sep 17 00:00:00 2001 +From: Eneas U de Queiroz <cotequeiroz@gmail.com> +Date: Sat, 10 Aug 2019 19:43:20 -0300 +Subject: [PATCH] Avoid libxml checks if --disable-xml is used + +Signed-off-by: Eneas U de Queiroz <cotequeiroz@gmail.com> +--- + m4/reorganization/libs/xml.m4 | 126 +++++++++++++++++----------------- + 1 file changed, 62 insertions(+), 64 deletions(-) + +diff --git a/m4/reorganization/libs/xml.m4 b/m4/reorganization/libs/xml.m4 +index 77b2c13a8..0709d2914 100644 +--- a/m4/reorganization/libs/xml.m4 ++++ b/m4/reorganization/libs/xml.m4 +@@ -12,87 +12,85 @@ if test "X$want_xml" != "Xno"; then + @<:@default=/usr/local or /usr if not found in /usr/local@:>@]), + [with_xml_val=$withval] + ) +-fi +- +-AS_IF([test "x$with_xml_val" = "xno"], [XML_HOME=""], +- [test "x$with_xml_val" = "xyes"], [XML_HOME="/usr/local"], +- [XML_HOME="$with_xml_val"]) + +-AS_IF([test "x$XML_HOME" != "x"], [ +- AS_IF([test ! -x "$XML_HOME/bin/xml2-config"], [XML_HOME=""]) +- ]) ++ AS_IF([test "x$with_xml_val" = "xno"], [XML_HOME=""], ++ [test "x$with_xml_val" = "xyes"], [XML_HOME="/usr/local"], ++ [XML_HOME="$with_xml_val"]) + +-AS_IF([test "x$XML_HOME" = "x" -a "x$with_xml_val" = "xyes"], [ +- AS_IF([test -x "/usr/bin/xml2-config"], [XML_HOME="/usr"]) +- ]) ++ AS_IF([test "x$XML_HOME" != "x"], [ ++ AS_IF([test ! -x "$XML_HOME/bin/xml2-config"], [XML_HOME=""]) ++ ]) + +-if test "x$XML_HOME" != "x"; then +- AC_MSG_RESULT([$XML_HOME]) +-else +- AC_MSG_RESULT([not found]) +-fi ++ AS_IF([test "x$XML_HOME" = "x" -a "x$with_xml_val" = "xyes"], [ ++ AS_IF([test -x "/usr/bin/xml2-config"], [XML_HOME="/usr"]) ++ ]) + +-found_xml="no" +-XMLCONF_VERSION="" +-XML_CPPFLAGS="" +-XML_LIBS="" +-if test "x$XML_HOME" != "x"; then +- AC_MSG_CHECKING([xml2-config version]) +- XMLCONF_VERSION="`$XML_HOME/bin/xml2-config --version`" +- if test "x%XMLCONF_VERSION" != "x"; then +- AC_MSG_RESULT([$XMLCONF_VERSION]) +- found_xml="yes" +- XML_CPPFLAGS="`$XML_HOME/bin/xml2-config --cflags`" +- XML_LIBS="`$XML_HOME/bin/xml2-config --libs`" +- AS_ECHO("$XML_CPPFLAGS") +- AS_ECHO("$XML_LIBS") ++ if test "x$XML_HOME" != "x"; then ++ AC_MSG_RESULT([$XML_HOME]) + else +- AC_MSG_ERROR([xml2-config failed]) ++ AC_MSG_RESULT([not found]) ++ fi ++ ++ found_xml="no" ++ XMLCONF_VERSION="" ++ XML_CPPFLAGS="" ++ XML_LIBS="" ++ if test "x$XML_HOME" != "x"; then ++ AC_MSG_CHECKING([xml2-config version]) ++ XMLCONF_VERSION="`$XML_HOME/bin/xml2-config --version`" ++ if test "x%XMLCONF_VERSION" != "x"; then ++ AC_MSG_RESULT([$XMLCONF_VERSION]) ++ found_xml="yes" ++ XML_CPPFLAGS="`$XML_HOME/bin/xml2-config --cflags`" ++ XML_LIBS="`$XML_HOME/bin/xml2-config --libs`" ++ AS_ECHO("$XML_CPPFLAGS") ++ AS_ECHO("$XML_LIBS") ++ else ++ AC_MSG_ERROR([xml2-config failed]) ++ fi + fi +-fi + +-working_xml="no" +-if test "X$found_xml" != "Xno"; then +- AC_MSG_CHECKING([for xmlreader.h in $XML_HOME]) ++ working_xml="no" ++ if test "X$found_xml" != "Xno"; then ++ AC_MSG_CHECKING([for xmlreader.h in $XML_HOME]) + +- if test ! -f "$XML_HOME/include/libxml2/libxml/xmlreader.h"; then +- AC_MSG_RESULT([not found]) +- else +- AC_MSG_RESULT([found]) +- save_LIBS="$LIBS" +- save_CPPFLAGS="$CPPFLAGS" +- CPPFLAGS="$CPPFLAGS $XML_CPPFLAGS" +- save_LDFLAGS="$LDFLAGS" +- LDFLAGS="$LDFLAGS $XML_LIBS" ++ if test ! -f "$XML_HOME/include/libxml2/libxml/xmlreader.h"; then ++ AC_MSG_RESULT([not found]) ++ else ++ AC_MSG_RESULT([found]) ++ save_LIBS="$LIBS" ++ save_CPPFLAGS="$CPPFLAGS" ++ CPPFLAGS="$CPPFLAGS $XML_CPPFLAGS" ++ save_LDFLAGS="$LDFLAGS" ++ LDFLAGS="$LDFLAGS $XML_LIBS" + +- AS_ECHO("CPPFLAGS: $CPPFLAGS") +- AS_ECHO("LD_FLAGS: $LDFLAGS") ++ AS_ECHO("CPPFLAGS: $CPPFLAGS") ++ AS_ECHO("LD_FLAGS: $LDFLAGS") + +- AC_CHECK_LIB([xml2], [xmlTextReaderRead], [working_xml="yes"], [working_xml="no"], [$XML_LIBS]) ++ AC_CHECK_LIB([xml2], [xmlTextReaderRead], [working_xml="yes"], [working_xml="no"], [$XML_LIBS]) + +- CPPFLAGS="$save_CPPFLAGS" +- LDFLAGS="$save_LDFLAGS" +- LIBS="$save_LIBS" ++ CPPFLAGS="$save_CPPFLAGS" ++ LDFLAGS="$save_LDFLAGS" ++ LIBS="$save_LIBS" ++ fi + fi +-fi + +-if test "$working_xml" = "yes"; then +- AC_DEFINE([HAVE_LIBXML2],1,[Define to 1 if you have the 'libxml2' library (-lxml2).]) +- AC_SUBST(XML_CPPFLAGS) +- AC_SUBST(XML_LIBS) +- AC_MSG_NOTICE([Compiling and linking with libxml2 from $XML_HOME]) +-else +- if test "$want_xml" = "yes"; then +- AC_MSG_ERROR([****** Please install libxml2 packages!]) ++ if test "$working_xml" = "yes"; then ++ AC_DEFINE([HAVE_LIBXML2],1,[Define to 1 if you have the 'libxml2' library (-lxml2).]) ++ AC_SUBST(XML_CPPFLAGS) ++ AC_SUBST(XML_LIBS) ++ AC_MSG_NOTICE([Compiling and linking with libxml2 from $XML_HOME]) + else +- if test "$want_xml" != "no"; then ++ if test "$want_xml" = "yes"; then ++ AC_MSG_ERROR([****** Please install libxml2 packages!]) ++ else + AC_MSG_NOTICE([****** libxml2 support unavailable]) + fi ++ XML_CPPFLAGS="" ++ XML_LIBS="" ++ AC_SUBST(XML_CPPFLAGS) ++ AC_SUBST(XML_LIBS) + fi +- XML_CPPFLAGS="" +- XML_LIBS="" +- AC_SUBST(XML_CPPFLAGS) +- AC_SUBST(XML_LIBS) + fi + + AM_CONDITIONAL([HAVE_LIBXML2], test "x$HAVE_LIBXML2" = "xyes") +-- +2.21.0 + diff --git a/net/dnstop/Makefile b/net/dnstop/Makefile new file mode 100644 index 000000000..6681552db --- /dev/null +++ b/net/dnstop/Makefile @@ -0,0 +1,35 @@ +include $(TOPDIR)/rules.mk + +PKG_NAME:=dnstop +PKG_RELEASE:=1 + +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/measurement-factory/dnstop.git +PKG_SOURCE_DATE:=2018-05-22 +PKG_SOURCE_VERSION:=a5a5d2e2ca9a433bb8f017682ac6f2085741bdf8 +PKG_MIRROR_HASH:=1fe443c6faf1726aeb86a53a3a44efce23cad604304036371e76ff020eb0dac2 + +PKG_MAINTAINER:=Ken Xu <windedge99@gmail.com> +PKG_LICENSE:=NLPL +PKG_LICENSE_FILES:=LICENSE + +include $(INCLUDE_DIR)/package.mk + +define Package/dnstop + SECTION:=net + CATEGORY:=Network + DEPENDS:=+libpcap +libncurses + TITLE:=stay on top of your dns traffic + URL:=http://dns.measurement-factory.com/tools/dnstop/ +endef + +define Package/dnstop/description + dnstop is a libpcap application (like tcpdump) that displays various tables of DNS traffic on your network +endef + +define Package/dnstop/install + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(PKG_BUILD_DIR)/dnstop $(1)/usr/bin/ +endef + +$(eval $(call BuildPackage,dnstop)) diff --git a/net/dnstop/patches/100-fix_udphdr_error.patch b/net/dnstop/patches/100-fix_udphdr_error.patch new file mode 100644 index 000000000..2b729ea60 --- /dev/null +++ b/net/dnstop/patches/100-fix_udphdr_error.patch @@ -0,0 +1,14 @@ +--- a/dnstop.c ++++ b/dnstop.c +@@ -71,11 +71,6 @@ static const char *Version = "@VERSION@" + #define ETHERTYPE_IPV6 0x86DD + #endif + +-#if defined(__linux__) || defined(__GLIBC__) || defined(__GNU__) +-#define uh_dport dest +-#define uh_sport source +-#endif +- + typedef struct { + inX_addr src; + int count; diff --git a/net/fossil/Makefile b/net/fossil/Makefile deleted file mode 100644 index fcece2070..000000000 --- a/net/fossil/Makefile +++ /dev/null @@ -1,72 +0,0 @@ -# -# Copyright (C) 2015 OpenWrt.org -# -# This is free software, licensed under the GNU General Public License v2. -# See /LICENSE for more information. -# - -include $(TOPDIR)/rules.mk - -PKG_NAME:=fossil -PKG_VERSION:=1.34 -PKG_RELEASE:=2 - -PKG_LICENSE:=BSD-2-Clause -PKG_MAINTAINER:=Luka Perkov <luka@openwrt.org> - -PKG_SOURCE:=$(PKG_NAME)-src-$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=https://www.fossil-scm.org/index.html/uv/download -PKG_HASH:=53a6b83e878feced9ac7705f87e5b6ea82727314e3e19202ae1c46c7e4dba49f - -PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-src-$(PKG_VERSION) - -PKG_INSTALL:=1 -PKG_BUILD_PARALLEL:=1 - -include $(INCLUDE_DIR)/package.mk - -define Package/fossil - SECTION:=net - CATEGORY:=Network - SUBMENU:=Version Control Systems - TITLE:=Simple distributed software configuration management - URL:=http://www.fossil-scm.org - DEPENDS:=+zlib -endef - -define Package/fossil/description - Fossil is a distributed version control system, bug tracking system - and wiki software server for use in software development. -endef - -MAKE_FLAGS := \ - TCC="$(TARGET_CC)" \ - CFLAGS="$(TARGET_CFLAGS) -I$(STAGING_DIR)/usr/include -DFOSSIL_ENABLE_JSON" \ - LDFLAGS="$(TARGET_LDFLAGS) -Wl,-rpath=$(TOOLCHAIN_DIR)/lib -L$(STAGING_DIR)/lib -L$(STAGING_DIR)/usr/lib -lm" \ - -define Build/Configure -endef - -define Build/Compile - $(call Build/Compile/Default, \ - -f Makefile.classic $(MAKE_FLAGS) all \ - ) -endef - -define Build/Install -endef - -define Package/fossil/conffiles -/etc/config/fossil -endef - -define Package/fossil/install - $(INSTALL_DIR) $(1)/usr/bin - $(INSTALL_BIN) $(PKG_BUILD_DIR)/$(PKG_NAME) $(1)/usr/bin/ - $(INSTALL_DIR) $(1)/etc/init.d - $(INSTALL_BIN) ./files/fossil.init $(1)/etc/init.d/fossil - $(INSTALL_DIR) $(1)/etc/config - $(INSTALL_CONF) ./files/fossil.config $(1)/etc/config/fossil -endef - -$(eval $(call BuildPackage,fossil)) diff --git a/net/fossil/files/fossil.config b/net/fossil/files/fossil.config deleted file mode 100644 index f35876c68..000000000 --- a/net/fossil/files/fossil.config +++ /dev/null @@ -1,5 +0,0 @@ -config server 'dummy' - option repository '/tmp/fossil/dummy' - option port 8008 - option localhost 0 - option create 1 diff --git a/net/fossil/files/fossil.init b/net/fossil/files/fossil.init deleted file mode 100644 index f5a17b985..000000000 --- a/net/fossil/files/fossil.init +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/sh /etc/rc.common -# Copyright (C) 2015 OpenWrt.org - -START=90 -USE_PROCD=1 - -start_instance() { - local cfg="$1" - - config_get repository "$cfg" repository - if [ -z "$repository" ]; then - echo "repository is not defined in $1, skipping" - return - fi - - config_get_bool create "$cfg" create 0 - - if [ "$create" -eq 0 -a ! -f "$repository" ]; then - echo "in $1 create option is '$create' and repository '$repository' is not a regular file, skipping" - return - fi - - if [ "$create" -eq 1 -a ! -d `dirname $repository` ]; then - mkdir -p `dirname $repository` - if [ "$?" -ne 0 ]; then - echo "could not create directory, skipping" - return - fi - fi - - config_get port "$cfg" port "" - if [ -z "$port" ]; then - echo "port is not defined in $1, skipping" - return - fi - - config_get_bool debug "$cfg" debug 0 - config_get_bool localhost "$cfg" localhost 1 - config_get_bool scgi "$cfg" scgi 0 - - procd_open_instance - procd_set_param command /usr/bin/fossil server "$repository" --port $port - [ "$debug" -eq 1 ] && procd_append_param command --th-trace - [ "$create" -eq 1 ] && procd_append_param command --user root --create - [ "$localhost" -eq 1 ] && procd_append_param command --localhost - [ "$scgi" -eq 1 ] && procd_append_param command --scgi - procd_set_param respawn - procd_close_instance -} - -start_service() { - config_load 'fossil' - config_foreach start_instance 'server' -} diff --git a/net/fossil/patches/001-no_https.patch b/net/fossil/patches/001-no_https.patch deleted file mode 100644 index cd83ffa75..000000000 --- a/net/fossil/patches/001-no_https.patch +++ /dev/null @@ -1,22 +0,0 @@ ---- a/Makefile.classic -+++ b/Makefile.classic -@@ -41,9 +41,6 @@ - # FOSSIL_ENABLE_MINIZ = 1 - # TCC += -DFOSSIL_ENABLE_MINIZ - --# To add support for HTTPS --TCC += -DFOSSIL_ENABLE_SSL -- - #### We sometimes add the -static option here so that we can build a - # static executable that will run in a chroot jail. - #LIB = -static -@@ -60,9 +57,6 @@ - # If using zlib: - LIB += $(ZLIB_LIB.$(FOSSIL_ENABLE_MINIZ)) $(LDFLAGS) - --# If using HTTPS: --LIB += -lcrypto -lssl -- - #### Tcl shell for use in running the fossil testsuite. If you do not - # care about testing the end result, this can be blank. - # diff --git a/net/frp/Makefile b/net/frp/Makefile index 994305d35..4033ae0fa 100644 --- a/net/frp/Makefile +++ b/net/frp/Makefile @@ -1,12 +1,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=frp -PKG_VERSION:=0.27.0 -PKG_RELEASE:=2 +PKG_VERSION:=0.28.0 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/fatedier/frp/tar.gz/v${PKG_VERSION}? -PKG_HASH:=5d2efd5d924c7a7f84a9f2838de6ab9b7d5ca070ab243edd404a5ca80237607c +PKG_HASH:=61afbd0e84fc1ab92eacce5a642e2590d1b8c1a972a78f6499165c1778aa62cf PKG_MAINTAINER:=Richard Yu <yurichard3839@gmail.com> PKG_LICENSE:=Apache-2.0 diff --git a/net/frp/files/frpc.config b/net/frp/files/frpc.config index 06bcbb186..492e224ff 100644 --- a/net/frp/files/frpc.config +++ b/net/frp/files/frpc.config @@ -8,7 +8,7 @@ config init # https://github.com/fatedier/frp#configuration-file-template # list env 'ENV_NAME=value' # Config files include in temporary config file. -# list conf_inc '/etc/frp/frps.d/frpc_full.ini' +# list conf_inc '/etc/frp/frpc.d/frpc_full.ini' config conf 'common' option server_addr 127.0.0.1 diff --git a/net/geth/Makefile b/net/geth/Makefile index 0bca8f56d..fb8e1929e 100644 --- a/net/geth/Makefile +++ b/net/geth/Makefile @@ -8,15 +8,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=go-ethereum -PKG_VERSION:=1.8.27 -PKG_RELEASE:=2 +PKG_VERSION:=1.9.1 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/ethereum/go-ethereum/tar.gz/v${PKG_VERSION}? -PKG_HASH:=45d264106991d0e2a4c34ac5d6539fc9460c768fc70588ea38a25f467039ece8 +PKG_HASH:=7394ae0eeac4b2aafa4bd56eef18c077088770bbce0962b215607b44369a5430 PKG_MAINTAINER:=Mislav Novakovic <mislav.novakovic@sartura.hr> -PKG_LICENSE:=GPL-3 LGPL-3 +PKG_LICENSE:=GPL-3-or-later LGPL-3-or-later PKG_LICENSE_FILES:=COPYING COPYING.LESSER PKG_BUILD_DEPENDS:=golang/host diff --git a/net/geth/patches/010-crypto-bn256-cloudflare-pull-in-upstream-fix-for-Go-.patch b/net/geth/patches/010-crypto-bn256-cloudflare-pull-in-upstream-fix-for-Go-.patch deleted file mode 100644 index ee2b096fa..000000000 --- a/net/geth/patches/010-crypto-bn256-cloudflare-pull-in-upstream-fix-for-Go-.patch +++ /dev/null @@ -1,127 +0,0 @@ -From 39bd2609ca730b3b628003b3f938aed7d49132ab Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?P=C3=A9ter=20Szil=C3=A1gyi?= <peterke@gmail.com> -Date: Thu, 28 Feb 2019 14:53:44 +0200 -Subject: [PATCH] crypto/bn256/cloudflare: pull in upstream fix for Go 1.12 R18 - ---- - crypto/bn256/cloudflare/mul_arm64.h | 32 ++++++++++++++--------------- - 1 file changed, 16 insertions(+), 16 deletions(-) - -diff --git a/crypto/bn256/cloudflare/mul_arm64.h b/crypto/bn256/cloudflare/mul_arm64.h -index 75d52217311..d405eb8f728 100644 ---- a/crypto/bn256/cloudflare/mul_arm64.h -+++ b/crypto/bn256/cloudflare/mul_arm64.h -@@ -12,7 +12,7 @@ - UMULH R1, R8, c4 \ - ADCS ZR, c4 \ - \ -- MUL R2, R5, R25 \ -+ MUL R2, R5, R1 \ - UMULH R2, R5, R26 \ - MUL R2, R6, R0 \ - ADDS R0, R26 \ -@@ -24,13 +24,13 @@ - ADCS R0, R29 \ - UMULH R2, R8, c5 \ - ADCS ZR, c5 \ -- ADDS R25, c1 \ -+ ADDS R1, c1 \ - ADCS R26, c2 \ - ADCS R27, c3 \ - ADCS R29, c4 \ - ADCS ZR, c5 \ - \ -- MUL R3, R5, R25 \ -+ MUL R3, R5, R1 \ - UMULH R3, R5, R26 \ - MUL R3, R6, R0 \ - ADDS R0, R26 \ -@@ -42,13 +42,13 @@ - ADCS R0, R29 \ - UMULH R3, R8, c6 \ - ADCS ZR, c6 \ -- ADDS R25, c2 \ -+ ADDS R1, c2 \ - ADCS R26, c3 \ - ADCS R27, c4 \ - ADCS R29, c5 \ - ADCS ZR, c6 \ - \ -- MUL R4, R5, R25 \ -+ MUL R4, R5, R1 \ - UMULH R4, R5, R26 \ - MUL R4, R6, R0 \ - ADDS R0, R26 \ -@@ -60,7 +60,7 @@ - ADCS R0, R29 \ - UMULH R4, R8, c7 \ - ADCS ZR, c7 \ -- ADDS R25, c3 \ -+ ADDS R1, c3 \ - ADCS R26, c4 \ - ADCS R27, c5 \ - ADCS R29, c6 \ -@@ -69,15 +69,15 @@ - #define gfpReduce() \ - \ // m = (T * N') mod R, store m in R1:R2:R3:R4 - MOVD ·np+0(SB), R17 \ -- MOVD ·np+8(SB), R18 \ -+ MOVD ·np+8(SB), R25 \ - MOVD ·np+16(SB), R19 \ - MOVD ·np+24(SB), R20 \ - \ - MUL R9, R17, R1 \ - UMULH R9, R17, R2 \ -- MUL R9, R18, R0 \ -+ MUL R9, R25, R0 \ - ADDS R0, R2 \ -- UMULH R9, R18, R3 \ -+ UMULH R9, R25, R3 \ - MUL R9, R19, R0 \ - ADCS R0, R3 \ - UMULH R9, R19, R4 \ -@@ -86,9 +86,9 @@ - \ - MUL R10, R17, R21 \ - UMULH R10, R17, R22 \ -- MUL R10, R18, R0 \ -+ MUL R10, R25, R0 \ - ADDS R0, R22 \ -- UMULH R10, R18, R23 \ -+ UMULH R10, R25, R23 \ - MUL R10, R19, R0 \ - ADCS R0, R23 \ - ADDS R21, R2 \ -@@ -97,7 +97,7 @@ - \ - MUL R11, R17, R21 \ - UMULH R11, R17, R22 \ -- MUL R11, R18, R0 \ -+ MUL R11, R25, R0 \ - ADDS R0, R22 \ - ADDS R21, R3 \ - ADCS R22, R4 \ -@@ -107,19 +107,19 @@ - \ - \ // m * N - loadModulus(R5,R6,R7,R8) \ -- mul(R17,R18,R19,R20,R21,R22,R23,R24) \ -+ mul(R17,R25,R19,R20,R21,R22,R23,R24) \ - \ - \ // Add the 512-bit intermediate to m*N -- MOVD ZR, R25 \ -+ MOVD ZR, R0 \ - ADDS R9, R17 \ -- ADCS R10, R18 \ -+ ADCS R10, R25 \ - ADCS R11, R19 \ - ADCS R12, R20 \ - ADCS R13, R21 \ - ADCS R14, R22 \ - ADCS R15, R23 \ - ADCS R16, R24 \ -- ADCS ZR, R25 \ -+ ADCS ZR, R0 \ - \ - \ // Our output is R21:R22:R23:R24. Reduce mod p if necessary. - SUBS R5, R21, R10 \ diff --git a/net/gnunet-secushare/Makefile b/net/gnunet-secushare/Makefile index cf327488b..a4ed478bf 100644 --- a/net/gnunet-secushare/Makefile +++ b/net/gnunet-secushare/Makefile @@ -2,10 +2,10 @@ include $(TOPDIR)/rules.mk PKG_NAME:=gnunet-secushare -PKG_SOURCE_VERSION:=81939cb93670efcee8e99884d10d2676b02edba9 -PKG_SOURCE_DATE:=20190228 -PKG_MIRROR_HASH:=64a0fb7ad6a515559360de71df85dde152f55a60585668f15114bc1f55cf2742 -PKG_RELEASE:=3 +PKG_SOURCE_VERSION:=5fc42cc72b97b22a27d8d6622060a429f7fa9098 +PKG_SOURCE_DATE:=20190728 +PKG_MIRROR_HASH:=44106b73d7077ff8123d9972f6a1f746eca7a4d4e47a623d6576db80bdb97bad +PKG_RELEASE:=1 PKG_SOURCE_URL:=https://gnunet.org/git/gnunet-secushare.git PKG_SOURCE_PROTO:=git PKG_LICENSE:=GPL-3.0 diff --git a/net/gnunet/Makefile b/net/gnunet/Makefile index b59107538..2be414f4d 100644 --- a/net/gnunet/Makefile +++ b/net/gnunet/Makefile @@ -2,11 +2,11 @@ include $(TOPDIR)/rules.mk PKG_NAME:=gnunet -PKG_VERSION:=0.11.5 -PKG_RELEASE:=2 +PKG_VERSION:=0.11.6 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=@GNU/gnunet -PKG_HASH:=98e0355ff0627bf88112b3b92a7522e98c0ae6071fc45efda5a33daed28199b3 +PKG_HASH:=f8a07063b1e0890a1386fed2313a967f58f5495c075a25725aba200469c857bf PKG_LICENSE:=AGPL-3.0 PKG_LICENSE_FILES:=COPYING @@ -281,7 +281,7 @@ PLUGIN_dhtcache-heap:=datacache_heap CONFLICTS_dhtcache-heap:=gnunet-dhtcache-pgsql gnunet-dhtcache-sqlite DEPENDS_gns-flat:=+gnunet-gns -PLUGIN_gns-flat:=namecache_flat namestore_heap +PLUGIN_gns-flat:=namecache_flat namestore_flat PLUGIN_peerstore-flat:=peerstore_flat diff --git a/net/gnunet/files/gnunet-gns-flat.defaults b/net/gnunet/files/gnunet-gns-flat.defaults index f37228dd7..e2794b7a8 100644 --- a/net/gnunet/files/gnunet-gns-flat.defaults +++ b/net/gnunet/files/gnunet-gns-flat.defaults @@ -7,7 +7,7 @@ uci -q batch <<EOF del gnunet.namestore_heap set gnunet.namestore_heap=gnunet-config set gnunet.namestore_heap.FILENAME=/etc/gnunet/namestore.flat - set gnunet.namestore.DATABASE=heap + set gnunet.namestore.DATABASE=flat del gnunet.namecache_flat set gnunet.namecache_flat=gnunet-config set gnunet.namecache_flat.FILENAME=/var/run/gnunet/namecache.flat diff --git a/net/haproxy/Makefile b/net/haproxy/Makefile index 0c6cb0739..d31349c6d 100644 --- a/net/haproxy/Makefile +++ b/net/haproxy/Makefile @@ -11,7 +11,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=haproxy PKG_VERSION:=2.0.3 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.haproxy.org/download/2.0/src diff --git a/net/haproxy/patches/000-BUG-MEDIUM-protocols-add-a-global-lock-for-the-init-deinit-stuff.patch b/net/haproxy/patches/000-BUG-MEDIUM-protocols-add-a-global-lock-for-the-init-deinit-stuff.patch new file mode 100644 index 000000000..de10b7d06 --- /dev/null +++ b/net/haproxy/patches/000-BUG-MEDIUM-protocols-add-a-global-lock-for-the-init-deinit-stuff.patch @@ -0,0 +1,302 @@ +commit 937604b4cfccddd607b8d4883815c4e3f9ab70d0 +Author: Willy Tarreau <w@1wt.eu> +Date: Wed Jul 24 16:45:02 2019 +0200 + + BUG/MEDIUM: protocols: add a global lock for the init/deinit stuff + + Dragan Dosen found that the listeners lock is not sufficient to protect + the listeners list when proxies are stopping because the listeners are + also unlinked from the protocol list, and under certain situations like + bombing with soft-stop signals or shutting down many frontends in parallel + from multiple CLI connections, it could be possible to provoke multiple + instances of delete_listener() to be called in parallel for different + listeners, thus corrupting the protocol lists. + + Such operations are pretty rare, they are performed once per proxy upon + startup and once per proxy on shut down. Thus there is no point trying + to optimize anything and we can use a global lock to protect the protocol + lists during these manipulations. + + This fix (or a variant) will have to be backported as far as 1.8. + + (cherry picked from commit daacf3664506d56a1f3b050ccba504886a18b12a) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/include/proto/protocol.h b/include/proto/protocol.h +index 7bbebb8e..f25f77f0 100644 +--- a/include/proto/protocol.h ++++ b/include/proto/protocol.h +@@ -23,9 +23,11 @@ + #define _PROTO_PROTOCOL_H + + #include <sys/socket.h> ++#include <common/hathreads.h> + #include <types/protocol.h> + + extern struct protocol *__protocol_by_family[AF_CUST_MAX]; ++__decl_hathreads(extern HA_SPINLOCK_T proto_lock); + + /* Registers the protocol <proto> */ + void protocol_register(struct protocol *proto); +diff --git a/include/types/protocol.h b/include/types/protocol.h +index 1d3404b9..f38baeb9 100644 +--- a/include/types/protocol.h ++++ b/include/types/protocol.h +@@ -80,9 +80,9 @@ struct protocol { + int (*pause)(struct listener *l); /* temporarily pause this listener for a soft restart */ + void (*add)(struct listener *l, int port); /* add a listener for this protocol and port */ + +- struct list listeners; /* list of listeners using this protocol */ +- int nb_listeners; /* number of listeners */ +- struct list list; /* list of registered protocols */ ++ struct list listeners; /* list of listeners using this protocol (under proto_lock) */ ++ int nb_listeners; /* number of listeners (under proto_lock) */ ++ struct list list; /* list of registered protocols (under proto_lock) */ + }; + + #define CONNECT_HAS_DATA 0x00000001 /* There's data available to be sent */ +diff --git a/src/listener.c b/src/listener.c +index 40a774ed..b5fe2ac2 100644 +--- a/src/listener.c ++++ b/src/listener.c +@@ -433,6 +433,9 @@ static void limit_listener(struct listener *l, struct list *list) + * used as a protocol's generic enable_all() primitive, for use after the + * fork(). It puts the listeners into LI_READY or LI_FULL states depending on + * their number of connections. It always returns ERR_NONE. ++ * ++ * Must be called with proto_lock held. ++ * + */ + int enable_all_listeners(struct protocol *proto) + { +@@ -447,6 +450,9 @@ int enable_all_listeners(struct protocol *proto) + * the polling lists when they are in the LI_READY or LI_FULL states. It is + * intended to be used as a protocol's generic disable_all() primitive. It puts + * the listeners into LI_LISTEN, and always returns ERR_NONE. ++ * ++ * Must be called with proto_lock held. ++ * + */ + int disable_all_listeners(struct protocol *proto) + { +@@ -516,6 +522,9 @@ void unbind_listener_no_close(struct listener *listener) + /* This function closes all listening sockets bound to the protocol <proto>, + * and the listeners end in LI_ASSIGNED state if they were higher. It does not + * detach them from the protocol. It always returns ERR_NONE. ++ * ++ * Must be called with proto_lock held. ++ * + */ + int unbind_all_listeners(struct protocol *proto) + { +@@ -580,14 +589,19 @@ int create_listeners(struct bind_conf *bc, const struct sockaddr_storage *ss, + * number of listeners is updated, as well as the global number of listeners + * and jobs. Note that the listener must have previously been unbound. This + * is the generic function to use to remove a listener. ++ * ++ * Will grab the proto_lock. ++ * + */ + void delete_listener(struct listener *listener) + { + HA_SPIN_LOCK(LISTENER_LOCK, &listener->lock); + if (listener->state == LI_ASSIGNED) { + listener->state = LI_INIT; ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + LIST_DEL(&listener->proto_list); + listener->proto->nb_listeners--; ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + _HA_ATOMIC_SUB(&jobs, 1); + _HA_ATOMIC_SUB(&listeners, 1); + } +diff --git a/src/proto_sockpair.c b/src/proto_sockpair.c +index a4faa370..e7dd670d 100644 +--- a/src/proto_sockpair.c ++++ b/src/proto_sockpair.c +@@ -80,6 +80,9 @@ INITCALL1(STG_REGISTER, protocol_register, &proto_sockpair); + /* Add <listener> to the list of sockpair listeners (port is ignored). The + * listener's state is automatically updated from LI_INIT to LI_ASSIGNED. + * The number of listeners for the protocol is updated. ++ * ++ * Must be called with proto_lock held. ++ * + */ + static void sockpair_add_listener(struct listener *listener, int port) + { +@@ -97,6 +100,8 @@ static void sockpair_add_listener(struct listener *listener, int port) + * loose them across the fork(). A call to uxst_enable_listeners() is needed + * to complete initialization. + * ++ * Must be called with proto_lock held. ++ * + * The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL. + */ + static int sockpair_bind_listeners(struct protocol *proto, char *errmsg, int errlen) +diff --git a/src/proto_tcp.c b/src/proto_tcp.c +index 64ffb83c..bcbe27a7 100644 +--- a/src/proto_tcp.c ++++ b/src/proto_tcp.c +@@ -1103,6 +1103,9 @@ int tcp_bind_listener(struct listener *listener, char *errmsg, int errlen) + * The sockets will be registered but not added to any fd_set, in order not to + * loose them across the fork(). A call to enable_all_listeners() is needed + * to complete initialization. The return value is composed from ERR_*. ++ * ++ * Must be called with proto_lock held. ++ * + */ + static int tcp_bind_listeners(struct protocol *proto, char *errmsg, int errlen) + { +@@ -1121,6 +1124,9 @@ static int tcp_bind_listeners(struct protocol *proto, char *errmsg, int errlen) + /* Add <listener> to the list of tcpv4 listeners, on port <port>. The + * listener's state is automatically updated from LI_INIT to LI_ASSIGNED. + * The number of listeners for the protocol is updated. ++ * ++ * Must be called with proto_lock held. ++ * + */ + static void tcpv4_add_listener(struct listener *listener, int port) + { +@@ -1136,6 +1142,9 @@ static void tcpv4_add_listener(struct listener *listener, int port) + /* Add <listener> to the list of tcpv6 listeners, on port <port>. The + * listener's state is automatically updated from LI_INIT to LI_ASSIGNED. + * The number of listeners for the protocol is updated. ++ * ++ * Must be called with proto_lock held. ++ * + */ + static void tcpv6_add_listener(struct listener *listener, int port) + { +diff --git a/src/proto_uxst.c b/src/proto_uxst.c +index 66093af6..7263240f 100644 +--- a/src/proto_uxst.c ++++ b/src/proto_uxst.c +@@ -379,6 +379,9 @@ static int uxst_unbind_listener(struct listener *listener) + /* Add <listener> to the list of unix stream listeners (port is ignored). The + * listener's state is automatically updated from LI_INIT to LI_ASSIGNED. + * The number of listeners for the protocol is updated. ++ * ++ * Must be called with proto_lock held. ++ * + */ + static void uxst_add_listener(struct listener *listener, int port) + { +@@ -594,6 +597,8 @@ static int uxst_connect_server(struct connection *conn, int flags) + * loose them across the fork(). A call to uxst_enable_listeners() is needed + * to complete initialization. + * ++ * Must be called with proto_lock held. ++ * + * The return value is composed from ERR_NONE, ERR_RETRYABLE and ERR_FATAL. + */ + static int uxst_bind_listeners(struct protocol *proto, char *errmsg, int errlen) +@@ -613,6 +618,9 @@ static int uxst_bind_listeners(struct protocol *proto, char *errmsg, int errlen) + /* This function stops all listening UNIX sockets bound to the protocol + * <proto>. It does not detaches them from the protocol. + * It always returns ERR_NONE. ++ * ++ * Must be called with proto_lock held. ++ * + */ + static int uxst_unbind_listeners(struct protocol *proto) + { +diff --git a/src/protocol.c b/src/protocol.c +index 96e01c82..ac45cf2e 100644 +--- a/src/protocol.c ++++ b/src/protocol.c +@@ -18,18 +18,26 @@ + #include <common/mini-clist.h> + #include <common/standard.h> + +-#include <types/protocol.h> ++#include <proto/protocol.h> + + /* List head of all registered protocols */ + static struct list protocols = LIST_HEAD_INIT(protocols); + struct protocol *__protocol_by_family[AF_CUST_MAX] = { }; + ++/* This is the global spinlock we may need to register/unregister listeners or ++ * protocols. Its main purpose is in fact to serialize the rare stop/deinit() ++ * phases. ++ */ ++__decl_spinlock(proto_lock); ++ + /* Registers the protocol <proto> */ + void protocol_register(struct protocol *proto) + { ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + LIST_ADDQ(&protocols, &proto->list); + if (proto->sock_domain >= 0 && proto->sock_domain < AF_CUST_MAX) + __protocol_by_family[proto->sock_domain] = proto; ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + } + + /* Unregisters the protocol <proto>. Note that all listeners must have +@@ -37,8 +45,10 @@ void protocol_register(struct protocol *proto) + */ + void protocol_unregister(struct protocol *proto) + { ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + LIST_DEL(&proto->list); + LIST_INIT(&proto->list); ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + } + + /* binds all listeners of all registered protocols. Returns a composition +@@ -50,6 +60,7 @@ int protocol_bind_all(char *errmsg, int errlen) + int err; + + err = 0; ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + list_for_each_entry(proto, &protocols, list) { + if (proto->bind_all) { + err |= proto->bind_all(proto, errmsg, errlen); +@@ -57,6 +68,7 @@ int protocol_bind_all(char *errmsg, int errlen) + break; + } + } ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + return err; + } + +@@ -71,11 +83,13 @@ int protocol_unbind_all(void) + int err; + + err = 0; ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + list_for_each_entry(proto, &protocols, list) { + if (proto->unbind_all) { + err |= proto->unbind_all(proto); + } + } ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + return err; + } + +@@ -89,11 +103,13 @@ int protocol_enable_all(void) + int err; + + err = 0; ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + list_for_each_entry(proto, &protocols, list) { + if (proto->enable_all) { + err |= proto->enable_all(proto); + } + } ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + return err; + } + +@@ -107,11 +123,13 @@ int protocol_disable_all(void) + int err; + + err = 0; ++ HA_SPIN_LOCK(PROTO_LOCK, &proto_lock); + list_for_each_entry(proto, &protocols, list) { + if (proto->disable_all) { + err |= proto->disable_all(proto); + } + } ++ HA_SPIN_UNLOCK(PROTO_LOCK, &proto_lock); + return err; + } + diff --git a/net/haproxy/patches/001-BUG-MINOR-proxy-always-lock-stop_proxy.patch b/net/haproxy/patches/001-BUG-MINOR-proxy-always-lock-stop_proxy.patch new file mode 100644 index 000000000..4f7a370ae --- /dev/null +++ b/net/haproxy/patches/001-BUG-MINOR-proxy-always-lock-stop_proxy.patch @@ -0,0 +1,64 @@ +commit 6d79cedaaa4a16b2f42d2bf2bc25772a51354e91 +Author: Willy Tarreau <w@1wt.eu> +Date: Wed Jul 24 17:42:44 2019 +0200 + + BUG/MINOR: proxy: always lock stop_proxy() + + There is one unprotected call to stop_proxy() from the manage_proxy() + task, so there is a single caller by definition, but there is also + another such call from the CLI's "shutdown frontend" parser. This + one does it under the proxy's lock but the first one doesn't use it. + Thus it is theorically possible to corrupt the list of listeners in a + proxy by issuing "shutdown frontend" and SIGUSR1 exactly at the same + time. While it sounds particularly contrived or stupid, it could + possibly happen with automated tools that would send actions via + various channels. This could cause the process to loop forever or + to crash and thus stop faster than expected. + + This might be backported as far as 1.8. + + (cherry picked from commit 3de3cd4d9761324b31d23eb2c4a9434ed33801b8) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/proxy.c b/src/proxy.c +index f669ebf1..ae761ead 100644 +--- a/src/proxy.c ++++ b/src/proxy.c +@@ -1258,13 +1258,16 @@ void zombify_proxy(struct proxy *p) + * to be called when going down in order to release the ports so that another + * process may bind to them. It must also be called on disabled proxies at the + * end of start-up. If all listeners are closed, the proxy is set to the +- * PR_STSTOPPED state. ++ * PR_STSTOPPED state. The function takes the proxy's lock so it's safe to ++ * call from multiple places. + */ + void stop_proxy(struct proxy *p) + { + struct listener *l; + int nostop = 0; + ++ HA_SPIN_LOCK(PROXY_LOCK, &p->lock); ++ + list_for_each_entry(l, &p->conf.listeners, by_fe) { + if (l->options & LI_O_NOSTOP) { + HA_ATOMIC_ADD(&unstoppable_jobs, 1); +@@ -1278,6 +1281,8 @@ void stop_proxy(struct proxy *p) + } + if (!nostop) + p->state = PR_STSTOPPED; ++ ++ HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock); + } + + /* This function resumes listening on the specified proxy. It scans all of its +@@ -2110,10 +2115,7 @@ static int cli_parse_shutdown_frontend(char **args, char *payload, struct appctx + send_log(px, LOG_WARNING, "Proxy %s stopped (FE: %lld conns, BE: %lld conns).\n", + px->id, px->fe_counters.cum_conn, px->be_counters.cum_conn); + +- HA_SPIN_LOCK(PROXY_LOCK, &px->lock); + stop_proxy(px); +- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); +- + return 1; + } + diff --git a/net/haproxy/patches/002-BUILD-threads-add-the-definition-of-PROTO_LOCK.patch b/net/haproxy/patches/002-BUILD-threads-add-the-definition-of-PROTO_LOCK.patch new file mode 100644 index 000000000..2c73ec848 --- /dev/null +++ b/net/haproxy/patches/002-BUILD-threads-add-the-definition-of-PROTO_LOCK.patch @@ -0,0 +1,33 @@ +commit a4ca26661f95a60974fb13a78b1a0c89f9c09ea9 +Author: Willy Tarreau <w@1wt.eu> +Date: Thu Jul 25 07:53:56 2019 +0200 + + BUILD: threads: add the definition of PROTO_LOCK + + This one was added by commit daacf3664 ("BUG/MEDIUM: protocols: add a + global lock for the init/deinit stuff") but I forgot to add it to the + include file, breaking DEBUG_THREAD. + + (cherry picked from commit d6e0c03384cab2c72fb6ab841420045108ea4e6f) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/include/common/hathreads.h b/include/common/hathreads.h +index a7c8dc93..b05215bd 100644 +--- a/include/common/hathreads.h ++++ b/include/common/hathreads.h +@@ -562,6 +562,7 @@ enum lock_label { + AUTH_LOCK, + LOGSRV_LOCK, + DICT_LOCK, ++ PROTO_LOCK, + OTHER_LOCK, + LOCK_LABELS + }; +@@ -679,6 +680,7 @@ static inline const char *lock_label(enum lock_label label) + case AUTH_LOCK: return "AUTH"; + case LOGSRV_LOCK: return "LOGSRV"; + case DICT_LOCK: return "DICT"; ++ case PROTO_LOCK: return "PROTO"; + case OTHER_LOCK: return "OTHER"; + case LOCK_LABELS: break; /* keep compiler happy */ + }; diff --git a/net/haproxy/patches/003-BUG-MEDIUM-lb-chash-Fix-the-realloc-when-the-number-of-nodes-is-increased.patch b/net/haproxy/patches/003-BUG-MEDIUM-lb-chash-Fix-the-realloc-when-the-number-of-nodes-is-increased.patch new file mode 100644 index 000000000..28b9fe0a5 --- /dev/null +++ b/net/haproxy/patches/003-BUG-MEDIUM-lb-chash-Fix-the-realloc-when-the-number-of-nodes-is-increased.patch @@ -0,0 +1,32 @@ +commit 974c6916ba2f7efc83193bb8c04e95294ca21112 +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Fri Jul 26 13:52:13 2019 +0200 + + BUG/MEDIUM: lb-chash: Fix the realloc() when the number of nodes is increased + + When the number of nodes is increased because the server weight is changed, the + nodes array must be realloc. But its new size is not correctly set. Only the + total number of nodes is used to set the new size. But it must also depends on + the size of a node. It must be the total nomber of nodes times the size of a + node. + + This issue was reported on Github (#189). + + This patch must be backported to all versions since the 1.6. + + (cherry picked from commit 366ad86af72c455cc958943913cb2de20eefee71) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/lb_chash.c b/src/lb_chash.c +index a35351e9..0bf4e81a 100644 +--- a/src/lb_chash.c ++++ b/src/lb_chash.c +@@ -84,7 +84,7 @@ static inline void chash_queue_dequeue_srv(struct server *s) + * increased the weight beyond the original weight + */ + if (s->lb_nodes_tot < s->next_eweight) { +- struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight); ++ struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes)); + + if (new_nodes) { + unsigned int j; diff --git a/net/haproxy/patches/004-BUG-MEDIUM-streams-Dont-switch-the-SI-to-SI_ST_DIS-if-we-have-data-to-send.patch b/net/haproxy/patches/004-BUG-MEDIUM-streams-Dont-switch-the-SI-to-SI_ST_DIS-if-we-have-data-to-send.patch new file mode 100644 index 000000000..69e1d62bb --- /dev/null +++ b/net/haproxy/patches/004-BUG-MEDIUM-streams-Dont-switch-the-SI-to-SI_ST_DIS-if-we-have-data-to-send.patch @@ -0,0 +1,32 @@ +commit 21a796cb83c29ee276feb04649a1b18214bbdee0 +Author: Olivier Houchard <ohouchard@haproxy.com> +Date: Fri Jul 26 14:54:34 2019 +0200 + + BUG/MEDIUM: streams: Don't switch the SI to SI_ST_DIS if we have data to send. + + In sess_established(), don't immediately switch the backend stream_interface + to SI_ST_DIS if we only got a SHUTR. We may still have something to send, + ie if the request is a POST, and we should be switched to SI_ST8DIS later + when the shutw will happen. + + This should be backported to 2.0 and 1.9. + + (cherry picked from commit 7859526fd6ce7ea33e20b7e532b21aa2465cb11d) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/stream.c b/src/stream.c +index a5c5f45c..64875c80 100644 +--- a/src/stream.c ++++ b/src/stream.c +@@ -954,8 +954,9 @@ static void sess_establish(struct stream *s) + si_chk_rcv(si); + } + req->wex = TICK_ETERNITY; +- /* If we managed to get the whole response, switch to SI_ST_DIS now. */ +- if (rep->flags & CF_SHUTR) ++ /* If we managed to get the whole response, and we don't have anything ++ * left to send, or can't, switch to SI_ST_DIS now. */ ++ if (rep->flags & (CF_SHUTR | CF_SHUTW)) + si->state = SI_ST_DIS; + } + diff --git a/net/haproxy/patches/005-BUG-MINOR-log-make-sure-writev-is-not-interrupted-on-a-file-output.patch b/net/haproxy/patches/005-BUG-MINOR-log-make-sure-writev-is-not-interrupted-on-a-file-output.patch new file mode 100644 index 000000000..5c4bbcd35 --- /dev/null +++ b/net/haproxy/patches/005-BUG-MINOR-log-make-sure-writev-is-not-interrupted-on-a-file-output.patch @@ -0,0 +1,42 @@ +commit 487b38e86c08431bc5f48aac72c8d753ee23cb03 +Author: Willy Tarreau <w@1wt.eu> +Date: Fri Jul 26 15:10:39 2019 +0200 + + BUG/MINOR: log: make sure writev() is not interrupted on a file output + + Since 1.9 we support sending logs to various non-blocking outputs like + stdou/stderr or flies, by using writev() which guarantees that it only + returns after having written everything or nothing. However the syscall + may be interrupted while doing so, and this is visible when writing to + a tty during debug sessions, as some logs occasionally appear interleaved + if an xterm or SSH connection is not very fast. Performance here is not a + critical concern, log correctness is. Let's simply take the logger's lock + around the writev() call to prevent multiple senders from stepping onto + each other's toes. + + This may be backported to 2.0 and 1.9. + + (cherry picked from commit 9fbcb7e2e9c32659ab11927394fec2e160be2d0b) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/log.c b/src/log.c +index ef999d13..99f185e4 100644 +--- a/src/log.c ++++ b/src/log.c +@@ -1672,8 +1672,15 @@ send: + iovec[7].iov_len = 1; + + if (logsrv->addr.ss_family == AF_UNSPEC) { +- /* the target is a direct file descriptor */ ++ /* the target is a direct file descriptor. While writev() guarantees ++ * to write everything, it doesn't guarantee that it will not be ++ * interrupted while doing so. This occasionally results in interleaved ++ * messages when the output is a tty, hence the lock. There's no real ++ * performance concern here for such type of output. ++ */ ++ HA_SPIN_LOCK(LOGSRV_LOCK, &logsrv->lock); + sent = writev(*plogfd, iovec, 8); ++ HA_SPIN_UNLOCK(LOGSRV_LOCK, &logsrv->lock); + } + else { + msghdr.msg_name = (struct sockaddr *)&logsrv->addr; diff --git a/net/haproxy/patches/006-DOC-improve-the-wording-in-CONTRIBUTING-about-how-to-document-a-bug-fix.patch b/net/haproxy/patches/006-DOC-improve-the-wording-in-CONTRIBUTING-about-how-to-document-a-bug-fix.patch new file mode 100644 index 000000000..e7c8c083a --- /dev/null +++ b/net/haproxy/patches/006-DOC-improve-the-wording-in-CONTRIBUTING-about-how-to-document-a-bug-fix.patch @@ -0,0 +1,101 @@ +commit 8de6badd32fb584d60733a6236113edba00f8701 +Author: Willy Tarreau <w@1wt.eu> +Date: Fri Jul 26 15:21:54 2019 +0200 + + DOC: improve the wording in CONTRIBUTING about how to document a bug fix + + Insufficiently described bug fixes are still too frequent. It's a real + pain to create each new maintenance release, as 3/4 of the time is spent + trying to guess what problem a patch fixes, which is already important + in order to decide whether to pick the fix or not, but is even more + capital in order to write understandable release notes. + + Christopher rightfully demands that a patch tagged "BUG" MUST ABSOLUTELY + describe the problem and why this problem is a bug. Describing the fix + is one thing but if the bug is unknown, why would there be a fix ? How + can a stable maintainer be convinced to take a fix if its author didn't + care about checking whether it was a real bug ? This patch tries to + explain a bit better what really needs to appear in the commit message + and how to describe a bug. + + To be backported to all relevant stable versions. + + (cherry picked from commit 41f638c1eb8167bb473a6c8811d7fd70d7c06e07) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/CONTRIBUTING b/CONTRIBUTING +index 0fcd921e..201e122d 100644 +--- a/CONTRIBUTING ++++ b/CONTRIBUTING +@@ -454,7 +454,18 @@ do not think about them anymore after a few patches. + + 11) Real commit messages please! + +- Please properly format your commit messages. To get an idea, just run ++ The commit message is how you're trying to convince a maintainer to adopt ++ your work and maintain it as long as possible. A dirty commit message almost ++ always comes with dirty code. Too short a commit message indicates that too ++ short an analysis was done and that side effects are extremely likely to be ++ encountered. It's the maintainer's job to decide to accept this work in its ++ current form or not, with the known constraints. Some patches which rework ++ architectural parts or fix sensitive bugs come with 20-30 lines of design ++ explanations, limitations, hypothesis or even doubts, and despite this it ++ happens when reading them 6 months later while trying to identify a bug that ++ developers still miss some information about corner cases. ++ ++ So please properly format your commit messages. To get an idea, just run + "git log" on the file you've just modified. Patches always have the format + of an e-mail made of a subject, a description and the actual patch. If you + are sending a patch as an e-mail formatted this way, it can quickly be +@@ -506,9 +517,17 @@ do not think about them anymore after a few patches. + + But in any case, it is important that there is a clean description of what + the patch does, the motivation for what it does, why it's the best way to do +- it, its impacts, and what it does not yet cover. Also, in HAProxy, like many +- projects which take a great care of maintaining stable branches, patches are +- reviewed later so that some of them can be backported to stable releases. ++ it, its impacts, and what it does not yet cover. And this is particularly ++ important for bugs. A patch tagged "BUG" must absolutely explain what the ++ problem is, why it is considered as a bug. Anybody, even non-developers, ++ should be able to tell whether or not a patch is likely to address an issue ++ they are facing. Indicating what the code will do after the fix doesn't help ++ if it does not say what problem is encountered without the patch. Note that ++ in some cases the bug is purely theorical and observed by reading the code. ++ In this case it's perfectly fine to provide an estimate about possible ++ effects. Also, in HAProxy, like many projects which take a great care of ++ maintaining stable branches, patches are reviewed later so that some of them ++ can be backported to stable releases. + + While reviewing hundreds of patches can seem cumbersome, with a proper + formatting of the subject line it actually becomes very easy. For example, +@@ -630,13 +649,23 @@ patch types include : + + - BUG fix for a bug. The severity of the bug should also be indicated + when known. Similarly, if a backport is needed to older versions, +- it should be indicated on the last line of the commit message. If +- the bug has been identified as a regression brought by a specific +- patch or version, this indication will be appreciated too. New +- maintenance releases are generally emitted when a few of these +- patches are merged. If the bug is a vulnerability for which a CVE +- identifier was assigned before you publish the fix, you can mention +- it in the commit message, it will help distro maintainers. ++ it should be indicated on the last line of the commit message. The ++ commit message MUST ABSOLUTELY describe the problem and its impact ++ to non-developers. Any user must be able to guess if this patch is ++ likely to fix a problem they are facing. Even if the bug was ++ discovered by accident while reading the code or running an ++ automated tool, it is mandatory to try to estimate what potential ++ issue it might cause and under what circumstances. There may even ++ be security implications sometimes so a minimum analysis is really ++ required. Also please think about stable maintainers who have to ++ build the release notes, they need to have enough input about the ++ bug's impact to explain it. If the bug has been identified as a ++ regression brought by a specific patch or version, this indication ++ will be appreciated too. New maintenance releases are generally ++ emitted when a few of these patches are merged. If the bug is a ++ vulnerability for which a CVE identifier was assigned before you ++ publish the fix, you can mention it in the commit message, it will ++ help distro maintainers. + + - CLEANUP code cleanup, silence of warnings, etc... theoretically no impact. + These patches will rarely be seen in stable branches, though they diff --git a/net/haproxy/patches/007-BUG-MINOR-hlua-htx-Reset-channels-analyzers-when-txn-done-is-called.patch b/net/haproxy/patches/007-BUG-MINOR-hlua-htx-Reset-channels-analyzers-when-txn-done-is-called.patch new file mode 100644 index 000000000..e4f045756 --- /dev/null +++ b/net/haproxy/patches/007-BUG-MINOR-hlua-htx-Reset-channels-analyzers-when-txn-done-is-called.patch @@ -0,0 +1,49 @@ +commit 72c692701ab4197f1f8ec7594b7e8ef5082b9d9e +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Fri Jul 26 16:40:24 2019 +0200 + + BUG/MINOR: hlua/htx: Reset channels analyzers when txn:done() is called + + For HTX streams, when txn:done() is called, the work is delegated to the + function http_reply_and_close(). But it is not enough. The channel's analyzers + must also be reset. Otherwise, some analyzers may still be called while + processing should be aborted. + + For instance, if the function is called from an http-request rules on the + frontend, request analyzers on the backend side are still called. So we may try + to add an header to the request, while this one was already reset. + + This patch must be backported to 2.0 and 1.9. + + (cherry picked from commit fe6a71b8e08234dbe03fbd2fa3017590681479df) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/hlua.c b/src/hlua.c +index 23d2aa04..f9d1d699 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -5996,8 +5996,12 @@ __LJMP static int hlua_txn_done(lua_State *L) + ic = &htxn->s->req; + oc = &htxn->s->res; + +- if (IS_HTX_STRM(htxn->s)) +- htx_reply_and_close(htxn->s, 0, NULL); ++ if (IS_HTX_STRM(htxn->s)) { ++ htxn->s->txn->status = 0; ++ http_reply_and_close(htxn->s, 0, NULL); ++ ic->analysers &= AN_REQ_FLT_END; ++ oc->analysers &= AN_RES_FLT_END; ++ } + else { + if (htxn->s->txn) { + /* HTTP mode, let's stay in sync with the stream */ +@@ -6031,6 +6035,9 @@ __LJMP static int hlua_txn_done(lua_State *L) + ic->analysers = 0; + } + ++ if (!(htxn->s->flags & SF_ERR_MASK)) // this is not really an error but it is ++ htxn->s->flags |= SF_ERR_LOCAL; // to mark that it comes from the proxy ++ + hlua->flags |= HLUA_STOP; + WILL_LJMP(hlua_done(L)); + return 0; diff --git a/net/haproxy/patches/008-BUG-MEDIUM-hlua-Check-the-calling-direction-in-lua-functions-of-the-HTTP-class.patch b/net/haproxy/patches/008-BUG-MEDIUM-hlua-Check-the-calling-direction-in-lua-functions-of-the-HTTP-class.patch new file mode 100644 index 000000000..b0a5f9fc4 --- /dev/null +++ b/net/haproxy/patches/008-BUG-MEDIUM-hlua-Check-the-calling-direction-in-lua-functions-of-the-HTTP-class.patch @@ -0,0 +1,201 @@ +commit dc2ee27c7a1908ca3157a10ad131f13644bcaea3 +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Fri Jul 26 16:17:01 2019 +0200 + + BUG/MEDIUM: hlua: Check the calling direction in lua functions of the HTTP class + + It is invalid to manipulate responses from http-request rules or to manipulate + requests from http-response rules. When http-request rules are evaluated, the + connection to server is not yet established, so there is no response at all. And + when http-response rules are evaluated, the request has already been sent to the + server. + + Now, the calling direction is checked. So functions "txn.http:req_*" can now + only be called from http-request rules and the functions "txn.http:res_*" can + only be called from http-response rules. + + This issue was reported on Github (#190). + + This patch must be backported to all versions since the 1.6. + + (cherry picked from commit 84a6d5bc217a418db8efc4e76a0a32860db2c608) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/hlua.c b/src/hlua.c +index f9d1d699..21351cd6 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -5346,6 +5346,9 @@ __LJMP static int hlua_http_req_get_headers(lua_State *L) + MAY_LJMP(check_args(L, 1, "req_get_headers")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + return hlua_http_get_headers(L, htxn, &htxn->s->txn->req); + } + +@@ -5356,6 +5359,9 @@ __LJMP static int hlua_http_res_get_headers(lua_State *L) + MAY_LJMP(check_args(L, 1, "res_get_headers")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + return hlua_http_get_headers(L, htxn, &htxn->s->txn->rsp); + } + +@@ -5393,6 +5399,9 @@ __LJMP static int hlua_http_req_rep_hdr(lua_State *L) + MAY_LJMP(check_args(L, 4, "req_rep_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_HDR)); + } + +@@ -5403,6 +5412,9 @@ __LJMP static int hlua_http_res_rep_hdr(lua_State *L) + MAY_LJMP(check_args(L, 4, "res_rep_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_HDR)); + } + +@@ -5413,6 +5425,9 @@ __LJMP static int hlua_http_req_rep_val(lua_State *L) + MAY_LJMP(check_args(L, 4, "req_rep_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_VAL)); + } + +@@ -5423,6 +5438,9 @@ __LJMP static int hlua_http_res_rep_val(lua_State *L) + MAY_LJMP(check_args(L, 4, "res_rep_val")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_VAL)); + } + +@@ -5462,6 +5480,9 @@ __LJMP static int hlua_http_req_del_hdr(lua_State *L) + MAY_LJMP(check_args(L, 2, "req_del_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + return hlua_http_del_hdr(L, htxn, &htxn->s->txn->req); + } + +@@ -5469,9 +5490,12 @@ __LJMP static int hlua_http_res_del_hdr(lua_State *L) + { + struct hlua_txn *htxn; + +- MAY_LJMP(check_args(L, 2, "req_del_hdr")); ++ MAY_LJMP(check_args(L, 2, "res_del_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + return hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp); + } + +@@ -5523,6 +5547,9 @@ __LJMP static int hlua_http_req_add_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "req_add_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req); + } + +@@ -5533,6 +5560,9 @@ __LJMP static int hlua_http_res_add_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "res_add_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp); + } + +@@ -5543,6 +5573,9 @@ static int hlua_http_req_set_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "req_set_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + hlua_http_del_hdr(L, htxn, &htxn->s->txn->req); + return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req); + } +@@ -5554,6 +5587,9 @@ static int hlua_http_res_set_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "res_set_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp); + return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp); + } +@@ -5565,6 +5601,9 @@ static int hlua_http_req_set_meth(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + lua_pushboolean(L, http_replace_req_line(0, name, name_len, htxn->p, htxn->s) != -1); + return 1; + } +@@ -5576,6 +5615,9 @@ static int hlua_http_req_set_path(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + lua_pushboolean(L, http_replace_req_line(1, name, name_len, htxn->p, htxn->s) != -1); + return 1; + } +@@ -5587,6 +5629,9 @@ static int hlua_http_req_set_query(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + /* Check length. */ + if (name_len > trash.size - 1) { + lua_pushboolean(L, 0); +@@ -5611,6 +5656,9 @@ static int hlua_http_req_set_uri(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + ++ if (htxn->dir != SMP_OPT_DIR_REQ) ++ WILL_LJMP(lua_error(L)); ++ + lua_pushboolean(L, http_replace_req_line(3, name, name_len, htxn->p, htxn->s) != -1); + return 1; + } +@@ -5622,6 +5670,9 @@ static int hlua_http_res_set_status(lua_State *L) + unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2)); + const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL)); + ++ if (htxn->dir != SMP_OPT_DIR_RES) ++ WILL_LJMP(lua_error(L)); ++ + http_set_status(code, reason, htxn->s); + return 0; + } diff --git a/net/haproxy/patches/009-MINOR-hlua-Dont-set-request-analyzers-on-response-channel-for-lua-actions.patch b/net/haproxy/patches/009-MINOR-hlua-Dont-set-request-analyzers-on-response-channel-for-lua-actions.patch new file mode 100644 index 000000000..8d799d332 --- /dev/null +++ b/net/haproxy/patches/009-MINOR-hlua-Dont-set-request-analyzers-on-response-channel-for-lua-actions.patch @@ -0,0 +1,34 @@ +commit b22f6501bc9838061472128360e0e55d08cb0bd9 +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Fri Jul 26 14:54:52 2019 +0200 + + MINOR: hlua: Don't set request analyzers on response channel for lua actions + + Setting some requests analyzers on the response channel was an old trick to be + sure to re-evaluate the request's analyers after the response's ones have been + called. It is no more necessary. In fact, this trick was removed in the version + 1.8 and backported up to the version 1.6. + + This patch must be backported to all versions since 1.6 to ease the backports of + fixes on the lua code. + + (cherry picked from commit 51fa358432247fe5d7259d9d8a0e08d49d429c73) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/hlua.c b/src/hlua.c +index 21351cd6..36454cdc 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -6873,11 +6873,8 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px, + * is detected on a response channel. This is useful + * only for actions targeted on the requests. + */ +- if (HLUA_IS_WAKERESWR(s->hlua)) { ++ if (HLUA_IS_WAKERESWR(s->hlua)) + s->res.flags |= CF_WAKE_WRITE; +- if ((analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE))) +- s->res.analysers |= analyzer; +- } + if (HLUA_IS_WAKEREQWR(s->hlua)) + s->req.flags |= CF_WAKE_WRITE; + /* We can quit the function without consistency check diff --git a/net/haproxy/patches/010-MINOR-hlua-Add-a-flag-on-the-lua-txn-to-know-in-which-context-it-can-be-used.patch b/net/haproxy/patches/010-MINOR-hlua-Add-a-flag-on-the-lua-txn-to-know-in-which-context-it-can-be-used.patch new file mode 100644 index 000000000..89160e421 --- /dev/null +++ b/net/haproxy/patches/010-MINOR-hlua-Add-a-flag-on-the-lua-txn-to-know-in-which-context-it-can-be-used.patch @@ -0,0 +1,110 @@ +commit ff96b8bd3f85155f65b2b9c9f046fe3e40f630a4 +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Fri Jul 26 15:09:53 2019 +0200 + + MINOR: hlua: Add a flag on the lua txn to know in which context it can be used + + When a lua action or a lua sample fetch is called, a lua transaction is + created. It is an entry in the stack containing the class TXN. Thanks to it, we + can know the direction (request or response) of the call. But, for some + functions, it is also necessary to know if the buffer is "HTTP ready" for the + given direction. "HTTP ready" means there is a valid HTTP message in the + channel's buffer. So, when a lua action or a lua sample fetch is called, the + flag HLUA_TXN_HTTP_RDY is set if it is appropriate. + + (cherry picked from commit bfab2dddad3ded87617d1e2db54761943d1eb32d) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/include/types/hlua.h b/include/types/hlua.h +index 70c76852..2f4e38be 100644 +--- a/include/types/hlua.h ++++ b/include/types/hlua.h +@@ -43,7 +43,8 @@ struct stream; + #define HLUA_F_AS_STRING 0x01 + #define HLUA_F_MAY_USE_HTTP 0x02 + +-#define HLUA_TXN_NOTERM 0x00000001 ++#define HLUA_TXN_NOTERM 0x00000001 ++#define HLUA_TXN_HTTP_RDY 0x00000002 /* Set if the txn is HTTP ready for the defined direction */ + + #define HLUA_CONCAT_BLOCSZ 2048 + +diff --git a/src/hlua.c b/src/hlua.c +index 36454cdc..d37e3c61 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -6494,6 +6494,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp + struct stream *stream = smp->strm; + const char *error; + const struct buffer msg = { }; ++ unsigned int hflags = HLUA_TXN_NOTERM; + + if (!stream) + return 0; +@@ -6517,6 +6518,13 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp + + consistency_set(stream, smp->opt, &stream->hlua->cons); + ++ if (stream->be->mode == PR_MODE_HTTP) { ++ if ((smp->opt & SMP_OPT_DIR) == SMP_OPT_DIR_REQ) ++ hflags |= ((stream->txn->req.msg_state < HTTP_MSG_BODY) ? 0 : HLUA_TXN_HTTP_RDY); ++ else ++ hflags |= ((stream->txn->rsp.msg_state < HTTP_MSG_BODY) ? 0 : HLUA_TXN_HTTP_RDY); ++ } ++ + /* If it is the first run, initialize the data for the call. */ + if (!HLUA_IS_RUNNING(stream->hlua)) { + +@@ -6541,8 +6549,7 @@ static int hlua_sample_fetch_wrapper(const struct arg *arg_p, struct sample *smp + lua_rawgeti(stream->hlua->T, LUA_REGISTRYINDEX, fcn->function_ref); + + /* push arguments in the stack. */ +- if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR, +- HLUA_TXN_NOTERM)) { ++ if (!hlua_txn_new(stream->hlua->T, stream, smp->px, smp->opt & SMP_OPT_DIR, hflags)) { + SEND_ERR(smp->px, "Lua sample-fetch '%s': full stack.\n", fcn->name); + RESET_SAFE_LJMP(stream->hlua->T); + return 0; +@@ -6759,16 +6766,16 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px, + struct session *sess, struct stream *s, int flags) + { + char **arg; +- unsigned int analyzer; ++ unsigned int hflags = 0; + int dir; + const char *error; + const struct buffer msg = { }; + + switch (rule->from) { +- case ACT_F_TCP_REQ_CNT: analyzer = AN_REQ_INSPECT_FE ; dir = SMP_OPT_DIR_REQ; break; +- case ACT_F_TCP_RES_CNT: analyzer = AN_RES_INSPECT ; dir = SMP_OPT_DIR_RES; break; +- case ACT_F_HTTP_REQ: analyzer = AN_REQ_HTTP_PROCESS_FE; dir = SMP_OPT_DIR_REQ; break; +- case ACT_F_HTTP_RES: analyzer = AN_RES_HTTP_PROCESS_BE; dir = SMP_OPT_DIR_RES; break; ++ case ACT_F_TCP_REQ_CNT: ; dir = SMP_OPT_DIR_REQ; break; ++ case ACT_F_TCP_RES_CNT: ; dir = SMP_OPT_DIR_RES; break; ++ case ACT_F_HTTP_REQ: hflags = HLUA_TXN_HTTP_RDY ; dir = SMP_OPT_DIR_REQ; break; ++ case ACT_F_HTTP_RES: hflags = HLUA_TXN_HTTP_RDY ; dir = SMP_OPT_DIR_RES; break; + default: + SEND_ERR(px, "Lua: internal error while execute action.\n"); + return ACT_RET_CONT; +@@ -6821,7 +6828,7 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px, + lua_rawgeti(s->hlua->T, LUA_REGISTRYINDEX, rule->arg.hlua_rule->fcn.function_ref); + + /* Create and and push object stream in the stack. */ +- if (!hlua_txn_new(s->hlua->T, s, px, dir, 0)) { ++ if (!hlua_txn_new(s->hlua->T, s, px, dir, hflags)) { + SEND_ERR(px, "Lua function '%s': full stack.\n", + rule->arg.hlua_rule->fcn.name); + RESET_SAFE_LJMP(s->hlua->T); +@@ -6864,9 +6871,9 @@ static enum act_return hlua_action(struct act_rule *rule, struct proxy *px, + case HLUA_E_AGAIN: + /* Set timeout in the required channel. */ + if (s->hlua->wake_time != TICK_ETERNITY) { +- if (analyzer & (AN_REQ_INSPECT_FE|AN_REQ_HTTP_PROCESS_FE)) ++ if (dir & SMP_OPT_DIR_REQ) + s->req.analyse_exp = s->hlua->wake_time; +- else if (analyzer & (AN_RES_INSPECT|AN_RES_HTTP_PROCESS_BE)) ++ else + s->res.analyse_exp = s->hlua->wake_time; + } + /* Some actions can be wake up when a "write" event diff --git a/net/haproxy/patches/011-BUG-MINOR-hlua-Only-execute-functions-of-HTTP-class-if-the-txn-is-HTTP-ready.patch b/net/haproxy/patches/011-BUG-MINOR-hlua-Only-execute-functions-of-HTTP-class-if-the-txn-is-HTTP-ready.patch new file mode 100644 index 000000000..0ebb58e38 --- /dev/null +++ b/net/haproxy/patches/011-BUG-MINOR-hlua-Only-execute-functions-of-HTTP-class-if-the-txn-is-HTTP-ready.patch @@ -0,0 +1,180 @@ +commit 2351ca211d655c1be9ef6d62880899102134266d +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Fri Jul 26 16:31:34 2019 +0200 + + BUG/MINOR: hlua: Only execute functions of HTTP class if the txn is HTTP ready + + The flag HLUA_TXN_HTTP_RDY was added in the previous commit to know when a + function is called for a channel with a valid HTTP message or not. Of course it + also depends on the calling direction. In this commit, we allow the execution of + functions of the HTTP class only if this flag is set. + + Nobody seems to use them from an unsupported context (for instance, trying to + set an HTTP header from a tcp-request rule). But it remains a bug leading to + undefined behaviors or crashes. + + This patch may be backported to all versions since the 1.6. It depends on the + commits "MINOR: hlua: Add a flag on the lua txn to know in which context it can + be used" and "MINOR: hlua: Don't set request analyzers on response channel for + lua actions". + + (cherry picked from commit 301eff8e215d5dc7130e1ebacd7cf8da09a4f643) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/hlua.c b/src/hlua.c +index d37e3c61..4d92fa44 100644 +--- a/src/hlua.c ++++ b/src/hlua.c +@@ -5346,7 +5346,7 @@ __LJMP static int hlua_http_req_get_headers(lua_State *L) + MAY_LJMP(check_args(L, 1, "req_get_headers")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return hlua_http_get_headers(L, htxn, &htxn->s->txn->req); +@@ -5359,7 +5359,7 @@ __LJMP static int hlua_http_res_get_headers(lua_State *L) + MAY_LJMP(check_args(L, 1, "res_get_headers")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return hlua_http_get_headers(L, htxn, &htxn->s->txn->rsp); +@@ -5399,7 +5399,7 @@ __LJMP static int hlua_http_req_rep_hdr(lua_State *L) + MAY_LJMP(check_args(L, 4, "req_rep_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_HDR)); +@@ -5412,7 +5412,7 @@ __LJMP static int hlua_http_res_rep_hdr(lua_State *L) + MAY_LJMP(check_args(L, 4, "res_rep_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_HDR)); +@@ -5425,7 +5425,7 @@ __LJMP static int hlua_http_req_rep_val(lua_State *L) + MAY_LJMP(check_args(L, 4, "req_rep_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->req, ACT_HTTP_REPLACE_VAL)); +@@ -5438,7 +5438,7 @@ __LJMP static int hlua_http_res_rep_val(lua_State *L) + MAY_LJMP(check_args(L, 4, "res_rep_val")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return MAY_LJMP(hlua_http_rep_hdr(L, htxn, &htxn->s->txn->rsp, ACT_HTTP_REPLACE_VAL)); +@@ -5480,7 +5480,7 @@ __LJMP static int hlua_http_req_del_hdr(lua_State *L) + MAY_LJMP(check_args(L, 2, "req_del_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return hlua_http_del_hdr(L, htxn, &htxn->s->txn->req); +@@ -5493,7 +5493,7 @@ __LJMP static int hlua_http_res_del_hdr(lua_State *L) + MAY_LJMP(check_args(L, 2, "res_del_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp); +@@ -5547,7 +5547,7 @@ __LJMP static int hlua_http_req_add_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "req_add_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return hlua_http_add_hdr(L, htxn, &htxn->s->txn->req); +@@ -5560,7 +5560,7 @@ __LJMP static int hlua_http_res_add_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "res_add_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + return hlua_http_add_hdr(L, htxn, &htxn->s->txn->rsp); +@@ -5573,7 +5573,7 @@ static int hlua_http_req_set_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "req_set_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + hlua_http_del_hdr(L, htxn, &htxn->s->txn->req); +@@ -5587,7 +5587,7 @@ static int hlua_http_res_set_hdr(lua_State *L) + MAY_LJMP(check_args(L, 3, "res_set_hdr")); + htxn = MAY_LJMP(hlua_checkhttp(L, 1)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + hlua_http_del_hdr(L, htxn, &htxn->s->txn->rsp); +@@ -5601,7 +5601,7 @@ static int hlua_http_req_set_meth(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + lua_pushboolean(L, http_replace_req_line(0, name, name_len, htxn->p, htxn->s) != -1); +@@ -5615,7 +5615,7 @@ static int hlua_http_req_set_path(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + lua_pushboolean(L, http_replace_req_line(1, name, name_len, htxn->p, htxn->s) != -1); +@@ -5629,7 +5629,7 @@ static int hlua_http_req_set_query(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + /* Check length. */ +@@ -5656,7 +5656,7 @@ static int hlua_http_req_set_uri(lua_State *L) + size_t name_len; + const char *name = MAY_LJMP(luaL_checklstring(L, 2, &name_len)); + +- if (htxn->dir != SMP_OPT_DIR_REQ) ++ if (htxn->dir != SMP_OPT_DIR_REQ || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + lua_pushboolean(L, http_replace_req_line(3, name, name_len, htxn->p, htxn->s) != -1); +@@ -5670,7 +5670,7 @@ static int hlua_http_res_set_status(lua_State *L) + unsigned int code = MAY_LJMP(luaL_checkinteger(L, 2)); + const char *reason = MAY_LJMP(luaL_optlstring(L, 3, NULL, NULL)); + +- if (htxn->dir != SMP_OPT_DIR_RES) ++ if (htxn->dir != SMP_OPT_DIR_RES || !(htxn->flags & HLUA_TXN_HTTP_RDY)) + WILL_LJMP(lua_error(L)); + + http_set_status(code, reason, htxn->s); diff --git a/net/haproxy/patches/012-BUG-MINOR-htx-Fix-free-space-addresses-calculation-during-a-block-expansion.patch b/net/haproxy/patches/012-BUG-MINOR-htx-Fix-free-space-addresses-calculation-during-a-block-expansion.patch new file mode 100644 index 000000000..3b43d72fc --- /dev/null +++ b/net/haproxy/patches/012-BUG-MINOR-htx-Fix-free-space-addresses-calculation-during-a-block-expansion.patch @@ -0,0 +1,37 @@ +commit 3cd7a1ea5110fc6a92627aaad06553a49723ac92 +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Mon Jul 29 10:50:28 2019 +0200 + + BUG/MINOR: htx: Fix free space addresses calculation during a block expansion + + When the payload of a block is shrinked or enlarged, addresses of the free + spaces must be updated. There are many possible cases. One of them is + buggy. When there is only one block in the HTX message and its payload is just + before the tail room and it needs to be moved in the head room to be enlarged, + addresses are not correctly updated. This bug may be hit by the compression + filter. + + This patch must be backported to 2.0. + + (cherry picked from commit 61ed7797f6440ee1102576365553650b1982a233) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/htx.c b/src/htx.c +index c29a66d7..cd21050c 100644 +--- a/src/htx.c ++++ b/src/htx.c +@@ -252,11 +252,13 @@ static int htx_prepare_blk_expansion(struct htx *htx, struct htx_blk *blk, int32 + ret = 1; + } + else if ((sz + delta) < headroom) { ++ uint32_t oldaddr = blk->addr; ++ + /* Move the block's payload into the headroom */ + blk->addr = htx->head_addr; + htx->tail_addr -= sz; + htx->head_addr += sz + delta; +- if (blk->addr == htx->end_addr) { ++ if (oldaddr == htx->end_addr) { + if (htx->end_addr == htx->tail_addr) { + htx->tail_addr = htx->head_addr; + htx->head_addr = htx->end_addr = 0; diff --git a/net/haproxy/patches/013-BUG-MAJOR-queue-threads-avoid-an-AB-BA-locking-issue-in-process_srv_queue.patch b/net/haproxy/patches/013-BUG-MAJOR-queue-threads-avoid-an-AB-BA-locking-issue-in-process_srv_queue.patch new file mode 100644 index 000000000..78e040c38 --- /dev/null +++ b/net/haproxy/patches/013-BUG-MAJOR-queue-threads-avoid-an-AB-BA-locking-issue-in-process_srv_queue.patch @@ -0,0 +1,225 @@ +commit 0ff395c154ad827c0c30eefc9371ba7f7c171027 +Author: Willy Tarreau <w@1wt.eu> +Date: Tue Jul 30 11:59:34 2019 +0200 + + BUG/MAJOR: queue/threads: avoid an AB/BA locking issue in process_srv_queue() + + A problem involving server slowstart was reported by @max2k1 in issue #197. + The problem is that pendconn_grab_from_px() takes the proxy lock while + already under the server's lock while process_srv_queue() first takes the + proxy's lock then the server's lock. + + While the latter seems more natural, it is fundamentally incompatible with + mayn other operations performed on servers, namely state change propagation, + where the proxy is only known after the server and cannot be locked around + the servers. Howwever reversing the lock in process_srv_queue() is trivial + and only the few functions related to dynamic cookies need to be adjusted + for this so that the proxy's lock is taken for each server operation. This + is possible because the proxy's server list is built once at boot time and + remains stable. So this is what this patch does. + + The comments in the proxy and server structs were updated to mention this + rule that the server's lock may not be taken under the proxy's lock but + may enclose it. + + Another approach could consist in using a second lock for the proxy's queue + which would be different from the regular proxy's lock, but given that the + operations above are rare and operate on small servers list, there is no + reason for overdesigning a solution. + + This fix was successfully tested with 10000 servers in a backend where + adjusting the dyncookies in loops over the CLI didn't have a measurable + impact on the traffic. + + The only workaround without the fix is to disable any occurrence of + "slowstart" on server lines, or to disable threads using "nbthread 1". + + This must be backported as far as 1.8. + + (cherry picked from commit 5e83d996cf965ee5ac625f702a446f4d8c80a220) + Signed-off-by: Willy Tarreau <w@1wt.eu> + +diff --git a/include/types/proxy.h b/include/types/proxy.h +index ca24dbfe..2518f88d 100644 +--- a/include/types/proxy.h ++++ b/include/types/proxy.h +@@ -487,7 +487,7 @@ struct proxy { + * name is used + */ + struct list filter_configs; /* list of the filters that are declared on this proxy */ +- __decl_hathreads(HA_SPINLOCK_T lock); ++ __decl_hathreads(HA_SPINLOCK_T lock); /* may be taken under the server's lock */ + }; + + struct switching_rule { +diff --git a/include/types/server.h b/include/types/server.h +index 4a077268..e0534162 100644 +--- a/include/types/server.h ++++ b/include/types/server.h +@@ -319,7 +319,7 @@ struct server { + } ssl_ctx; + #endif + struct dns_srvrq *srvrq; /* Pointer representing the DNS SRV requeest, if any */ +- __decl_hathreads(HA_SPINLOCK_T lock); ++ __decl_hathreads(HA_SPINLOCK_T lock); /* may enclose the proxy's lock, must not be taken under */ + struct { + const char *file; /* file where the section appears */ + struct eb32_node id; /* place in the tree of used IDs */ +diff --git a/src/proxy.c b/src/proxy.c +index ae761ead..a537e0b1 100644 +--- a/src/proxy.c ++++ b/src/proxy.c +@@ -1940,9 +1940,12 @@ static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct + if (!px) + return 1; + ++ /* Note: this lock is to make sure this doesn't change while another ++ * thread is in srv_set_dyncookie(). ++ */ + HA_SPIN_LOCK(PROXY_LOCK, &px->lock); +- + px->ck_opts |= PR_CK_DYNAMIC; ++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); + + for (s = px->srv; s != NULL; s = s->next) { + HA_SPIN_LOCK(SERVER_LOCK, &s->lock); +@@ -1950,8 +1953,6 @@ static int cli_parse_enable_dyncookie_backend(char **args, char *payload, struct + HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock); + } + +- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); +- + return 1; + } + +@@ -1971,9 +1972,12 @@ static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struc + if (!px) + return 1; + ++ /* Note: this lock is to make sure this doesn't change while another ++ * thread is in srv_set_dyncookie(). ++ */ + HA_SPIN_LOCK(PROXY_LOCK, &px->lock); +- + px->ck_opts &= ~PR_CK_DYNAMIC; ++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); + + for (s = px->srv; s != NULL; s = s->next) { + HA_SPIN_LOCK(SERVER_LOCK, &s->lock); +@@ -1984,8 +1988,6 @@ static int cli_parse_disable_dyncookie_backend(char **args, char *payload, struc + HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock); + } + +- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); +- + return 1; + } + +@@ -2021,10 +2023,13 @@ static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struc + return 1; + } + ++ /* Note: this lock is to make sure this doesn't change while another ++ * thread is in srv_set_dyncookie(). ++ */ + HA_SPIN_LOCK(PROXY_LOCK, &px->lock); +- + free(px->dyncookie_key); + px->dyncookie_key = newkey; ++ HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); + + for (s = px->srv; s != NULL; s = s->next) { + HA_SPIN_LOCK(SERVER_LOCK, &s->lock); +@@ -2032,8 +2037,6 @@ static int cli_parse_set_dyncookie_key_backend(char **args, char *payload, struc + HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock); + } + +- HA_SPIN_UNLOCK(PROXY_LOCK, &px->lock); +- + return 1; + } + +diff --git a/src/queue.c b/src/queue.c +index f4a94530..6aa54170 100644 +--- a/src/queue.c ++++ b/src/queue.c +@@ -312,16 +312,16 @@ void process_srv_queue(struct server *s) + struct proxy *p = s->proxy; + int maxconn; + +- HA_SPIN_LOCK(PROXY_LOCK, &p->lock); + HA_SPIN_LOCK(SERVER_LOCK, &s->lock); ++ HA_SPIN_LOCK(PROXY_LOCK, &p->lock); + maxconn = srv_dynamic_maxconn(s); + while (s->served < maxconn) { + int ret = pendconn_process_next_strm(s, p); + if (!ret) + break; + } +- HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock); + HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock); ++ HA_SPIN_UNLOCK(SERVER_LOCK, &s->lock); + } + + /* Adds the stream <strm> to the pending connection queue of server <strm>->srv +@@ -424,7 +424,8 @@ int pendconn_redistribute(struct server *s) + /* Check for pending connections at the backend, and assign some of them to + * the server coming up. The server's weight is checked before being assigned + * connections it may not be able to handle. The total number of transferred +- * connections is returned. ++ * connections is returned. It must be called with the server lock held, and ++ * will take the proxy's lock. + */ + int pendconn_grab_from_px(struct server *s) + { +diff --git a/src/server.c b/src/server.c +index a96f1ef6..236d6bae 100644 +--- a/src/server.c ++++ b/src/server.c +@@ -125,7 +125,7 @@ static inline void srv_check_for_dup_dyncookie(struct server *s) + } + + /* +- * Must be called with the server lock held. ++ * Must be called with the server lock held, and will grab the proxy lock. + */ + void srv_set_dyncookie(struct server *s) + { +@@ -137,15 +137,17 @@ void srv_set_dyncookie(struct server *s) + int addr_len; + int port; + ++ HA_SPIN_LOCK(PROXY_LOCK, &p->lock); ++ + if ((s->flags & SRV_F_COOKIESET) || + !(s->proxy->ck_opts & PR_CK_DYNAMIC) || + s->proxy->dyncookie_key == NULL) +- return; ++ goto out; + key_len = strlen(p->dyncookie_key); + + if (s->addr.ss_family != AF_INET && + s->addr.ss_family != AF_INET6) +- return; ++ goto out; + /* + * Buffer to calculate the cookie value. + * The buffer contains the secret key + the server IP address +@@ -174,7 +176,7 @@ void srv_set_dyncookie(struct server *s) + hash_value = XXH64(tmpbuf, buffer_len, 0); + memprintf(&s->cookie, "%016llx", hash_value); + if (!s->cookie) +- return; ++ goto out; + s->cklen = 16; + + /* Don't bother checking if the dyncookie is duplicated if +@@ -183,6 +185,8 @@ void srv_set_dyncookie(struct server *s) + */ + if (!(s->next_admin & SRV_ADMF_FMAINT)) + srv_check_for_dup_dyncookie(s); ++ out: ++ HA_SPIN_UNLOCK(PROXY_LOCK, &p->lock); + } + + /* diff --git a/net/haproxy/patches/014-BUG-MINOR-debug-fix-a-small-race-in-the-thread-dumping-code.patch b/net/haproxy/patches/014-BUG-MINOR-debug-fix-a-small-race-in-the-thread-dumping-code.patch new file mode 100644 index 000000000..72dca16fd --- /dev/null +++ b/net/haproxy/patches/014-BUG-MINOR-debug-fix-a-small-race-in-the-thread-dumping-code.patch @@ -0,0 +1,71 @@ +commit da767eaaf6128eccd349a54ec6eac2a68dcacacb +Author: Willy Tarreau <w@1wt.eu> +Date: Wed Jul 31 19:15:45 2019 +0200 + + BUG/MINOR: debug: fix a small race in the thread dumping code + + If a thread dump is requested from a signal handler, it may interrupt + a thread already waiting for a dump to complete, and may see the + threads_to_dump variable go to zero while others are waiting, steal + the lock and prevent other threads from ever completing. This tends + to happen when dumping many threads upon a watchdog timeout, to threads + waiting for their turn. + + Instead now we proceed in two steps : + 1) the last dumped thread sets all bits again + 2) all threads only wait for their own bit to appear, then clear it + and quit + + This way there's no risk that a bit performs a double flip in the same + loop and threads cannot get stuck here anymore. + + This should be backported to 2.0 as it clarifies stack traces. + + (cherry picked from commit c07736209db764fb2aef6f18ed3687a504c35771) + Signed-off-by: Willy Tarreau <w@1wt.eu> + +diff --git a/src/debug.c b/src/debug.c +index 059bc6b9..07624ca5 100644 +--- a/src/debug.c ++++ b/src/debug.c +@@ -440,8 +440,8 @@ void debug_handler(int sig, siginfo_t *si, void *arg) + * 1- wait for our turn, i.e. when all lower bits are gone. + * 2- perform the action if our bit is set + * 3- remove our bit to let the next one go, unless we're +- * the last one and have to put them all but ours +- * 4- wait for zero and clear our bit if it's set ++ * the last one and have to put them all as a signal ++ * 4- wait out bit to re-appear, then clear it and quit. + */ + + /* wait for all previous threads to finish first */ +@@ -454,7 +454,7 @@ void debug_handler(int sig, siginfo_t *si, void *arg) + ha_thread_dump(thread_dump_buffer, tid, thread_dump_tid); + if ((threads_to_dump & all_threads_mask) == tid_bit) { + /* last one */ +- HA_ATOMIC_STORE(&threads_to_dump, all_threads_mask & ~tid_bit); ++ HA_ATOMIC_STORE(&threads_to_dump, all_threads_mask); + thread_dump_buffer = NULL; + } + else +@@ -462,14 +462,13 @@ void debug_handler(int sig, siginfo_t *si, void *arg) + } + + /* now wait for all others to finish dumping. The last one will set all +- * bits again to broadcast the leaving condition. ++ * bits again to broadcast the leaving condition so we'll see ourselves ++ * present again. This way the threads_to_dump variable never passes to ++ * zero until all visitors have stopped waiting. + */ +- while (threads_to_dump & all_threads_mask) { +- if (threads_to_dump & tid_bit) +- HA_ATOMIC_AND(&threads_to_dump, ~tid_bit); +- else +- ha_thread_relax(); +- } ++ while (!(threads_to_dump & tid_bit)) ++ ha_thread_relax(); ++ HA_ATOMIC_AND(&threads_to_dump, ~tid_bit); + + /* mark the current thread as stuck to detect it upon next invocation + * if it didn't move. diff --git a/net/haproxy/patches/015-MINOR-wdt-also-consider-that-waiting-in-the-thread-dumper-is-normal.patch b/net/haproxy/patches/015-MINOR-wdt-also-consider-that-waiting-in-the-thread-dumper-is-normal.patch new file mode 100644 index 000000000..07a1eec72 --- /dev/null +++ b/net/haproxy/patches/015-MINOR-wdt-also-consider-that-waiting-in-the-thread-dumper-is-normal.patch @@ -0,0 +1,70 @@ +commit 445b2b7c52a13678241a190c4ff52e77a09ef0a6 +Author: Willy Tarreau <w@1wt.eu> +Date: Wed Jul 31 19:20:39 2019 +0200 + + MINOR: wdt: also consider that waiting in the thread dumper is normal + + It happens that upon looping threads the watchdog fires, starts a dump, + and other threads expire their budget while waiting for the other threads + to get dumped and trigger a watchdog event again, adding some confusion + to the traces. With this patch the situation becomes clearer as we export + the list of threads being dumped so that the watchdog can check it before + deciding to trigger. This way such threads in queue for being dumped are + not attempted to be reported in turn. + + This should be backported to 2.0 as it helps understand stack traces. + + (cherry picked from commit a37cb1880c81b1f038e575d88ba7210aea0b7b8f) + Signed-off-by: Willy Tarreau <w@1wt.eu> + +diff --git a/include/common/debug.h b/include/common/debug.h +index 333203dd..f43258e9 100644 +--- a/include/common/debug.h ++++ b/include/common/debug.h +@@ -70,6 +70,7 @@ + + struct task; + struct buffer; ++extern volatile unsigned long threads_to_dump; + void ha_task_dump(struct buffer *buf, const struct task *task, const char *pfx); + void ha_thread_dump(struct buffer *buf, int thr, int calling_tid); + void ha_thread_dump_all_to_trash(); +diff --git a/src/debug.c b/src/debug.c +index 07624ca5..3077e97c 100644 +--- a/src/debug.c ++++ b/src/debug.c +@@ -29,6 +29,11 @@ + #include <proto/stream_interface.h> + #include <proto/task.h> + ++/* mask of threads still having to dump, used to respect ordering. Only used ++ * when USE_THREAD_DUMP is set. ++ */ ++volatile unsigned long threads_to_dump = 0; ++ + /* Dumps to the buffer some known information for the desired thread, and + * optionally extra info for the current thread. The dump will be appended to + * the buffer, so the caller is responsible for preliminary initializing it. +@@ -405,9 +410,6 @@ void ha_thread_dump_all_to_trash() + */ + #define DEBUGSIG SIGURG + +-/* mask of threads still having to dump, used to respect ordering */ +-static volatile unsigned long threads_to_dump; +- + /* ID of the thread requesting the dump */ + static unsigned int thread_dump_tid; + +diff --git a/src/wdt.c b/src/wdt.c +index 19d36c34..aa89fd44 100644 +--- a/src/wdt.c ++++ b/src/wdt.c +@@ -75,7 +75,7 @@ void wdt_handler(int sig, siginfo_t *si, void *arg) + if (n - p < 1000000000UL) + goto update_and_leave; + +- if ((threads_harmless_mask|sleeping_thread_mask) & (1UL << thr)) { ++ if ((threads_harmless_mask|sleeping_thread_mask|threads_to_dump) & (1UL << thr)) { + /* This thread is currently doing exactly nothing + * waiting in the poll loop (unlikely but possible), + * waiting for all other threads to join the rendez-vous diff --git a/net/haproxy/patches/016-BUG-MEDIUM-lb-chash-Ensure-the-tree-integrity-when-server-weight-is-increased.patch b/net/haproxy/patches/016-BUG-MEDIUM-lb-chash-Ensure-the-tree-integrity-when-server-weight-is-increased.patch new file mode 100644 index 000000000..0026b8598 --- /dev/null +++ b/net/haproxy/patches/016-BUG-MEDIUM-lb-chash-Ensure-the-tree-integrity-when-server-weight-is-increased.patch @@ -0,0 +1,56 @@ +commit 0fc2d46fabb2b9317daf7030162e828c7e1684d5 +Author: Christopher Faulet <cfaulet@haproxy.com> +Date: Thu Aug 1 10:09:29 2019 +0200 + + BUG/MEDIUM: lb-chash: Ensure the tree integrity when server weight is increased + + When the server weight is increased in consistant hash, extra nodes have to be + allocated. So a realloc() is performed on the nodes array of the server. the + previous commit 962ea7732 ("BUG/MEDIUM: lb-chash: Remove all server's entries + before realloc() to re-insert them after") have fixed the size used during the + realloc() to avoid segfaults. But another bug remains. After the realloc(), the + memory area allocated for the nodes array may change, invalidating all node + addresses in the chash tree. + + So, to fix the bug, we must remove all server's entries from the chash tree + before the realloc to insert all of them after, old nodes and new ones. The + insert will be automatically handled by the loop at the end of the function + chash_queue_dequeue_srv(). + + Note that if the call to realloc() failed, no new entries will be created for + the server, so the effective server weight will be unchanged. + + This issue was reported on Github (#189). + + This patch must be backported to all versions since the 1.6. + + (cherry picked from commit 0a52c17f819a5b0a17718b605bdd990b9e2b58e6) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/lb_chash.c b/src/lb_chash.c +index 0bf4e81a..23448df8 100644 +--- a/src/lb_chash.c ++++ b/src/lb_chash.c +@@ -84,8 +84,13 @@ static inline void chash_queue_dequeue_srv(struct server *s) + * increased the weight beyond the original weight + */ + if (s->lb_nodes_tot < s->next_eweight) { +- struct tree_occ *new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes)); ++ struct tree_occ *new_nodes; + ++ /* First we need to remove all server's entries from its tree ++ * because the realloc will change all nodes pointers */ ++ chash_dequeue_srv(s); ++ ++ new_nodes = realloc(s->lb_nodes, s->next_eweight * sizeof(*new_nodes)); + if (new_nodes) { + unsigned int j; + +@@ -494,7 +499,6 @@ void chash_init_server_tree(struct proxy *p) + srv->lb_nodes_tot = srv->uweight * BE_WEIGHT_SCALE; + srv->lb_nodes_now = 0; + srv->lb_nodes = calloc(srv->lb_nodes_tot, sizeof(struct tree_occ)); +- + for (node = 0; node < srv->lb_nodes_tot; node++) { + srv->lb_nodes[node].server = srv; + srv->lb_nodes[node].node.key = full_hash(srv->puid * SRV_EWGHT_RANGE + node); diff --git a/net/haproxy/patches/017-BUG-MAJOR-http-sample-use-a-static-buffer-for-raw---htx-conversion.patch b/net/haproxy/patches/017-BUG-MAJOR-http-sample-use-a-static-buffer-for-raw---htx-conversion.patch new file mode 100644 index 000000000..994bc37f5 --- /dev/null +++ b/net/haproxy/patches/017-BUG-MAJOR-http-sample-use-a-static-buffer-for-raw---htx-conversion.patch @@ -0,0 +1,71 @@ +commit c0968f59b723dfa9effa63ac28b59642b11c6b8b +Author: Richard Russo <russor@whatsapp.com> +Date: Wed Jul 31 11:45:56 2019 -0700 + + BUG/MAJOR: http/sample: use a static buffer for raw -> htx conversion + + Multiple calls to smp_fetch_fhdr use the header context to keep track of + header parsing position; however, when using header sampling on a raw + connection, the raw buffer is converted into an HTX structure each time, and + this was done in the trash areas; so the block reference would be invalid on + subsequent calls. + + This patch must be backported to 2.0 and 1.9. + + (cherry picked from commit 458eafb36df88932a02d1ce7ca31832abf11b8b3) + Signed-off-by: Christopher Faulet <cfaulet@haproxy.com> + +diff --git a/src/http_fetch.c b/src/http_fetch.c +index 67ea2094..e372a122 100644 +--- a/src/http_fetch.c ++++ b/src/http_fetch.c +@@ -46,10 +46,40 @@ + /* this struct is used between calls to smp_fetch_hdr() or smp_fetch_cookie() */ + static THREAD_LOCAL struct hdr_ctx static_hdr_ctx; + static THREAD_LOCAL struct http_hdr_ctx static_http_hdr_ctx; ++/* this is used to convert raw connection buffers to htx */ ++static THREAD_LOCAL struct buffer static_raw_htx_chunk; ++static THREAD_LOCAL char *static_raw_htx_buf; + + #define SMP_REQ_CHN(smp) (smp->strm ? &smp->strm->req : NULL) + #define SMP_RES_CHN(smp) (smp->strm ? &smp->strm->res : NULL) + ++/* This function returns the static htx chunk, where raw connections get ++ * converted to HTX as needed for samplxsing. ++ */ ++struct buffer *get_raw_htx_chunk(void) ++{ ++ chunk_reset(&static_raw_htx_chunk); ++ return &static_raw_htx_chunk; ++} ++ ++static int alloc_raw_htx_chunk_per_thread() ++{ ++ static_raw_htx_buf = malloc(global.tune.bufsize); ++ if (!static_raw_htx_buf) ++ return 0; ++ chunk_init(&static_raw_htx_chunk, static_raw_htx_buf, global.tune.bufsize); ++ return 1; ++} ++ ++static void free_raw_htx_chunk_per_thread() ++{ ++ free(static_raw_htx_buf); ++ static_raw_htx_buf = NULL; ++} ++ ++REGISTER_PER_THREAD_ALLOC(alloc_raw_htx_chunk_per_thread); ++REGISTER_PER_THREAD_FREE(free_raw_htx_chunk_per_thread); ++ + /* + * Returns the data from Authorization header. Function may be called more + * than once so data is stored in txn->auth_data. When no header is found +@@ -265,7 +295,7 @@ struct htx *smp_prefetch_htx(struct sample *smp, struct channel *chn, int vol) + else if (h1m.flags & H1_MF_CLEN) + flags |= HTX_SL_F_CLEN; + +- htx = htx_from_buf(get_trash_chunk()); ++ htx = htx_from_buf(get_raw_htx_chunk()); + sl = htx_add_stline(htx, HTX_BLK_REQ_SL, flags, h1sl.rq.m, h1sl.rq.u, h1sl.rq.v); + if (!sl || !htx_add_all_headers(htx, hdrs)) + return NULL; diff --git a/net/haproxy/patches/018-BUG-MINOR-stream-int-also-update-analysers-timeouts-on-activity.patch b/net/haproxy/patches/018-BUG-MINOR-stream-int-also-update-analysers-timeouts-on-activity.patch new file mode 100644 index 000000000..118283b18 --- /dev/null +++ b/net/haproxy/patches/018-BUG-MINOR-stream-int-also-update-analysers-timeouts-on-activity.patch @@ -0,0 +1,46 @@ +commit 7343c710152c586a232a194ef37a56af636d6a56 +Author: Willy Tarreau <w@1wt.eu> +Date: Thu Aug 1 18:51:38 2019 +0200 + + BUG/MINOR: stream-int: also update analysers timeouts on activity + + Between 1.6 and 1.7, some parts of the stream forwarding process were + moved into lower layers and the stream-interface had to keep the + stream's task up to date regarding the timeouts. The analyser timeouts + were not updated there as it was believed this was not needed during + forwarding, but actually there is a case for this which is "option + contstats" which periodically triggers the analyser timeout, and this + change broke the option in case of sustained traffic (if there is some + I/O activity during the same millisecond as the timeout expires, then + the update will be missed). + + This patch simply brings back the analyser expiration updates from + process_stream() to stream_int_notify(). + + It may be backported as far as 1.7, taking care to adjust the fields + names if needed. + + (cherry picked from commit 45bcb37f0f8fa1e16dd9358a59dc280a38834dcd) + Signed-off-by: Willy Tarreau <w@1wt.eu> + +diff --git a/src/stream_interface.c b/src/stream_interface.c +index 9b9a8e9f..7d89cc90 100644 +--- a/src/stream_interface.c ++++ b/src/stream_interface.c +@@ -558,6 +558,16 @@ static void stream_int_notify(struct stream_interface *si) + task->expire = tick_first((tick_is_expired(task->expire, now_ms) ? 0 : task->expire), + tick_first(tick_first(ic->rex, ic->wex), + tick_first(oc->rex, oc->wex))); ++ ++ task->expire = tick_first(task->expire, ic->analyse_exp); ++ task->expire = tick_first(task->expire, oc->analyse_exp); ++ ++ if (si->exp) ++ task->expire = tick_first(task->expire, si->exp); ++ ++ if (sio->exp) ++ task->expire = tick_first(task->expire, sio->exp); ++ + task_queue(task); + } + if (ic->flags & CF_READ_ACTIVITY) diff --git a/net/haproxy/patches/019-BUG-MEDIUM-mux-h2-unbreak-receipt-of-large-DATA-frames.patch b/net/haproxy/patches/019-BUG-MEDIUM-mux-h2-unbreak-receipt-of-large-DATA-frames.patch new file mode 100644 index 000000000..579d358fa --- /dev/null +++ b/net/haproxy/patches/019-BUG-MEDIUM-mux-h2-unbreak-receipt-of-large-DATA-frames.patch @@ -0,0 +1,37 @@ +commit a8fcdacb8cc0dddec72b1ddc4d9afc92d3684acd +Author: Willy Tarreau <w@1wt.eu> +Date: Fri Aug 2 07:48:47 2019 +0200 + + BUG/MEDIUM: mux-h2: unbreak receipt of large DATA frames + + Recent optimization in commit 4d7a88482 ("MEDIUM: mux-h2: don't try to + read more than needed") broke the receipt of large DATA frames because + it would unconditionally subscribe if there was some room left, thus + preventing any new rx from being done since subscription may only be + done once the end was reached, as indicated by ret == 0. + + However, fixing this uncovered that in HTX mode previous versions might + occasionally be affected as well, when an available frame is the same + size as the maximum data that may fit into an HTX buffer, we may end + up reading that whole frame and still subscribe since it's still allowed + to receive, thus causing issues to read the next frame. + + This patch will only work for 2.1-dev but a minor adaptation will be + needed for earlier versions (down to 1.9, where subscribe() was added). + + (cherry picked from commit 9bc1c95855b9c6300de5ecf3720cbe4b2558c5a1) + Signed-off-by: Willy Tarreau <w@1wt.eu> + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index 5bb85181..d605fe94 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -2766,7 +2766,7 @@ static int h2_recv(struct h2c *h2c) + ret = 0; + } while (ret > 0); + +- if (h2_recv_allowed(h2c) && (b_data(buf) < buf->size)) ++ if (max && !ret && h2_recv_allowed(h2c)) + conn->xprt->subscribe(conn, conn->xprt_ctx, SUB_RETRY_RECV, &h2c->wait_event); + + if (!b_data(buf)) { diff --git a/net/haproxy/patches/020-BUG-MEDIUM-mux-h2-split-the-streams-and-connections-window-sizes.patch b/net/haproxy/patches/020-BUG-MEDIUM-mux-h2-split-the-streams-and-connections-window-sizes.patch new file mode 100644 index 000000000..acde533f4 --- /dev/null +++ b/net/haproxy/patches/020-BUG-MEDIUM-mux-h2-split-the-streams-and-connections-window-sizes.patch @@ -0,0 +1,227 @@ +commit 5a9c875f0f1ee83bd5889dd1ad53e9da43e6c34e +Author: Willy Tarreau <w@1wt.eu> +Date: Fri Aug 2 07:52:08 2019 +0200 + + BUG/MEDIUM: mux-h2: split the stream's and connection's window sizes + + The SETTINGS frame parser updates all streams' window for each + INITIAL_WINDOW_SIZE setting received on the connection (like h2spec + does in test 6.5.3), which can start to be expensive if repeated when + there are many streams (up to 100 by default). A quick test shows that + it's possible to parse only 35000 settings per second on a 3 GHz core + for 100 streams, which is rather small. + + Given that window sizes are relative and may be negative, there's no + point in pre-initializing them for each stream and update them from + the settings. Instead, let's make them relative to the connection's + initial window size so that any change immediately affects all streams. + The only thing that remains needed is to wake up the streams that were + unblocked by the update, which is now done once at the end of + h2_process_demux() instead of once per setting. This now results in + 5.7 million settings being processed per second, which is way better. + + In order to keep the change small, the h2s' mws field was renamed to + "sws" for "stream window size", and an h2s_mws() function was added + to add it to the connection's initial window setting and determine the + window size to use when muxing. The h2c_update_all_ws() function was + renamed to h2c_unblock_sfctl() since it's now only used to unblock + previously blocked streams. + + This needs to be backported to all versions till 1.8. + + (cherry picked from commit 1d4a0f88100daeb17dd0c9470c659b1ec288bc07) + [wt: context adjustment, port to legacy parts] + Signed-off-by: Willy Tarreau <w@1wt.eu> + +diff --git a/src/mux_h2.c b/src/mux_h2.c +index d605fe94..f90e9435 100644 +--- a/src/mux_h2.c ++++ b/src/mux_h2.c +@@ -208,7 +208,7 @@ struct h2s { + struct eb32_node by_id; /* place in h2c's streams_by_id */ + int32_t id; /* stream ID */ + uint32_t flags; /* H2_SF_* */ +- int mws; /* mux window size for this stream */ ++ int sws; /* stream window size, to be added to the mux's initial window size */ + enum h2_err errcode; /* H2 err code (H2_ERR_*) */ + enum h2_ss st; + uint16_t status; /* HTTP response status */ +@@ -707,6 +707,14 @@ static inline __maybe_unused int h2s_id(const struct h2s *h2s) + return h2s ? h2s->id : 0; + } + ++/* returns the sum of the stream's own window size and the mux's initial ++ * window, which together form the stream's effective window size. ++ */ ++static inline int h2s_mws(const struct h2s *h2s) ++{ ++ return h2s->sws + h2s->h2c->miw; ++} ++ + /* returns true of the mux is currently busy as seen from stream <h2s> */ + static inline __maybe_unused int h2c_mux_busy(const struct h2c *h2c, const struct h2s *h2s) + { +@@ -945,7 +953,7 @@ static struct h2s *h2s_new(struct h2c *h2c, int id) + LIST_INIT(&h2s->sending_list); + h2s->h2c = h2c; + h2s->cs = NULL; +- h2s->mws = h2c->miw; ++ h2s->sws = 0; + h2s->flags = H2_SF_NONE; + h2s->errcode = H2_ERR_NO_ERROR; + h2s->st = H2_SS_IDLE; +@@ -1543,30 +1551,23 @@ static void h2_wake_some_streams(struct h2c *h2c, int last) + } + } + +-/* Increase all streams' outgoing window size by the difference passed in +- * argument. This is needed upon receipt of the settings frame if the initial +- * window size is different. The difference may be negative and the resulting +- * window size as well, for the time it takes to receive some window updates. ++/* Wake up all blocked streams whose window size has become positive after the ++ * mux's initial window was adjusted. This should be done after having processed ++ * SETTINGS frames which have updated the mux's initial window size. + */ +-static void h2c_update_all_ws(struct h2c *h2c, int diff) ++static void h2c_unblock_sfctl(struct h2c *h2c) + { + struct h2s *h2s; + struct eb32_node *node; + +- if (!diff) +- return; +- + node = eb32_first(&h2c->streams_by_id); + while (node) { + h2s = container_of(node, struct h2s, by_id); +- h2s->mws += diff; +- +- if (h2s->mws > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) { ++ if (h2s->flags & H2_SF_BLK_SFCTL && h2s_mws(h2s) > 0) { + h2s->flags &= ~H2_SF_BLK_SFCTL; + if (h2s->send_wait && !LIST_ADDED(&h2s->list)) + LIST_ADDQ(&h2c->send_list, &h2s->list); + } +- + node = eb32_next(node); + } + } +@@ -1607,7 +1608,6 @@ static int h2c_handle_settings(struct h2c *h2c) + error = H2_ERR_FLOW_CONTROL_ERROR; + goto fail; + } +- h2c_update_all_ws(h2c, arg - h2c->miw); + h2c->miw = arg; + break; + case H2_SETTINGS_MAX_FRAME_SIZE: +@@ -1869,13 +1869,13 @@ static int h2c_handle_window_update(struct h2c *h2c, struct h2s *h2s) + goto strm_err; + } + +- if (h2s->mws >= 0 && h2s->mws + inc < 0) { ++ if (h2s_mws(h2s) >= 0 && h2s_mws(h2s) + inc < 0) { + error = H2_ERR_FLOW_CONTROL_ERROR; + goto strm_err; + } + +- h2s->mws += inc; +- if (h2s->mws > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) { ++ h2s->sws += inc; ++ if (h2s_mws(h2s) > 0 && (h2s->flags & H2_SF_BLK_SFCTL)) { + h2s->flags &= ~H2_SF_BLK_SFCTL; + if (h2s->send_wait && !LIST_ADDED(&h2s->list)) + LIST_ADDQ(&h2c->send_list, &h2s->list); +@@ -2237,6 +2237,7 @@ static void h2_process_demux(struct h2c *h2c) + struct h2s *h2s = NULL, *tmp_h2s; + struct h2_fh hdr; + unsigned int padlen = 0; ++ int32_t old_iw = h2c->miw; + + if (h2c->st0 >= H2_CS_ERROR) + return; +@@ -2625,6 +2626,9 @@ static void h2_process_demux(struct h2c *h2c) + h2s_notify_recv(h2s); + } + ++ if (old_iw != h2c->miw) ++ h2c_unblock_sfctl(h2c); ++ + h2c_restart_reading(h2c, 0); + } + +@@ -4259,8 +4263,8 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf, + if (size > max) + size = max; + +- if (size > h2s->mws) +- size = h2s->mws; ++ if (size > h2s_mws(h2s)) ++ size = h2s_mws(h2s); + + if (size <= 0) { + h2s->flags |= H2_SF_BLK_SFCTL; +@@ -4362,7 +4366,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf, + ofs += size; + total += size; + h1m->curr_len -= size; +- h2s->mws -= size; ++ h2s->sws -= size; + h2c->mws -= size; + + if (size && !h1m->curr_len && (h1m->flags & H1_MF_CHNK)) { +@@ -4390,7 +4394,7 @@ static size_t h2s_frt_make_resp_data(struct h2s *h2s, const struct buffer *buf, + } + + end: +- trace("[%d] sent simple H2 DATA response (sid=%d) = %d bytes out (%u in, st=%s, ep=%u, es=%s, h2cws=%d h2sws=%d) data=%u", h2c->st0, h2s->id, size+9, (unsigned int)total, h1m_state_str(h1m->state), h1m->err_pos, h1m_state_str(h1m->err_state), h2c->mws, h2s->mws, (unsigned int)b_data(buf)); ++ trace("[%d] sent simple H2 DATA response (sid=%d) = %d bytes out (%u in, st=%s, ep=%u, es=%s, h2cws=%d h2sws=%d) data=%u", h2c->st0, h2s->id, size+9, (unsigned int)total, h1m_state_str(h1m->state), h1m->err_pos, h1m_state_str(h1m->err_state), h2c->mws, h2s_mws(h2s), (unsigned int)b_data(buf)); + return total; + } + +@@ -4937,7 +4941,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si + */ + if (unlikely(fsize == count && + htx->used == 1 && type == HTX_BLK_DATA && +- fsize <= h2s->mws && fsize <= h2c->mws && fsize <= h2c->mfs)) { ++ fsize <= h2s_mws(h2s) && fsize <= h2c->mws && fsize <= h2c->mfs)) { + void *old_area = mbuf->area; + + if (b_data(mbuf)) { +@@ -4972,7 +4976,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si + h2_set_frame_size(outbuf.area, fsize); + + /* update windows */ +- h2s->mws -= fsize; ++ h2s->sws -= fsize; + h2c->mws -= fsize; + + /* and exchange with our old area */ +@@ -5024,7 +5028,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si + if (!fsize) + goto send_empty; + +- if (h2s->mws <= 0) { ++ if (h2s_mws(h2s) <= 0) { + h2s->flags |= H2_SF_BLK_SFCTL; + if (LIST_ADDED(&h2s->list)) + LIST_DEL_INIT(&h2s->list); +@@ -5034,8 +5038,8 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si + if (fsize > count) + fsize = count; + +- if (fsize > h2s->mws) +- fsize = h2s->mws; // >0 ++ if (fsize > h2s_mws(h2s)) ++ fsize = h2s_mws(h2s); // >0 + + if (h2c->mfs && fsize > h2c->mfs) + fsize = h2c->mfs; // >0 +@@ -5071,7 +5075,7 @@ static size_t h2s_htx_frt_make_resp_data(struct h2s *h2s, struct buffer *buf, si + + /* now let's copy this this into the output buffer */ + memcpy(outbuf.area + 9, htx_get_blk_ptr(htx, blk), fsize); +- h2s->mws -= fsize; ++ h2s->sws -= fsize; + h2c->mws -= fsize; + count -= fsize; + diff --git a/net/haproxy/patches/000-OPENWRT-add-uclibc-support.patch b/net/haproxy/patches/021-OPENWRT-add-uclibc-support.patch index 275702522..275702522 100644 --- a/net/haproxy/patches/000-OPENWRT-add-uclibc-support.patch +++ b/net/haproxy/patches/021-OPENWRT-add-uclibc-support.patch diff --git a/net/haproxy/patches/001-OPENWRT-openssl-deprecated.patch b/net/haproxy/patches/022-OPENWRT-openssl-deprecated.patch index 541077e23..541077e23 100644 --- a/net/haproxy/patches/001-OPENWRT-openssl-deprecated.patch +++ b/net/haproxy/patches/022-OPENWRT-openssl-deprecated.patch diff --git a/net/https-dns-proxy/Makefile b/net/https-dns-proxy/Makefile index 6e9f8e1cd..27660ca6d 100644 --- a/net/https-dns-proxy/Makefile +++ b/net/https-dns-proxy/Makefile @@ -16,6 +16,8 @@ PKG_LICENSE:=MIT include $(INCLUDE_DIR)/package.mk include $(INCLUDE_DIR)/cmake.mk +CMAKE_OPTIONS += -DCLANG_TIDY_EXE= + define Package/https_dns_proxy SECTION:=net CATEGORY:=Network diff --git a/net/i2pd/Makefile b/net/i2pd/Makefile index 20708c027..4775365d1 100644 --- a/net/i2pd/Makefile +++ b/net/i2pd/Makefile @@ -9,13 +9,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=i2pd -PKG_VERSION:=2.24.0 +PKG_VERSION:=2.26.0 PKG_RELEASE:=1 PKG_BUILD_PARALLEL:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/PurpleI2P/i2pd/tar.gz/$(PKG_VERSION)? -PKG_HASH:=809b37100f0f176432b01ab6edee96dc62b0f65d5bf7531e008a87117e742566 +PKG_HASH:=2ae18978c8796bb6b45bc8cfe4e1f25377e0cfc9fcf9f46054b09dc3384eef63 PKG_MAINTAINER:=David Yang <mmyangfl@gmail.com> PKG_LICENSE:=BSD-3-Clause diff --git a/net/i2pd/files/i2pd.init b/net/i2pd/files/i2pd.init index 4af101363..a40a74205 100755 --- a/net/i2pd/files/i2pd.init +++ b/net/i2pd/files/i2pd.init @@ -18,14 +18,13 @@ CONFFILE=/etc/i2pd/i2pd.conf -start_service() { +i2pd_start() { + local cfg="$1" local data_dir local addressbook_dir - config_load i2pd - - config_get data_dir i2pd data_dir - config_get addressbook_dir i2pd addressbook_dir + config_get data_dir "$cfg" data_dir + config_get addressbook_dir "$cfg" addressbook_dir ## Setting up data dir if [ ! -d "$data_dir" ]; then @@ -57,3 +56,28 @@ start_service() { procd_set_param pidfile "$PIDFILE" procd_close_instance } + + +start_service() { + local instance="$1" + local instance_found=0 + + config_cb() { + local type="$1" + local name="$2" + if [ "$type" = "i2pd" ]; then + if [ -n "$instance" ] && [ "$instance" = "$name" ]; then + instance_found=1 + fi + fi + } + + config_load i2pd + + if [ -n "$instance" ]; then + [ "$instance_found" -gt 0 ] || return + i2pd_start "$instance" + else + config_foreach i2pd_start i2pd + fi +} diff --git a/net/ipvsadm/Makefile b/net/ipvsadm/Makefile index 88c78b302..f591ebdc4 100644 --- a/net/ipvsadm/Makefile +++ b/net/ipvsadm/Makefile @@ -9,15 +9,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=ipvsadm -PKG_VERSION:=1.29 +PKG_VERSION:=1.30 PKG_MAINTAINER:=Mauro Mozzarelli <mauro@ezplanet.org>, \ Florian Eckert <fe@dev.tdt.de> -PKG_LICENSE:=GPL-2.0+ +PKG_LICENSE:=GPL-2.0-or-later PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz -PKG_SOURCE_URL:=https://www.kernel.org/pub/linux/utils/kernel/ipvsadm/ -PKG_HASH:=c3de4a21d90a02c621f0c72ee36a7aa27374b6f29fd4178f33fbf71b4c66c149 +PKG_SOURCE_URL:=@KERNEL/linux/utils/kernel/ipvsadm/ +PKG_HASH:=95573d70df473c9f63fc4ac496c044c69e3a6de7ccac119922210c0b44cd7a0c PKG_BUILD_PARALLEL:=1 PKG_INSTALL:=1 @@ -29,7 +29,7 @@ define Package/ipvsadm CATEGORY:=Network TITLE:=IP Virtual Server Configuration Manager URL:=http://www.linuxvirtualserver.org - DEPENDS:= +kmod-nf-ipvs +libnl-tiny +libpopt + DEPENDS:= +kmod-nf-ipvs +libnl-genl +libpopt endef define Package/ipvsadm/description @@ -40,17 +40,6 @@ define Package/ipvsadm/description network services based on a cluster of two or more nodes. endef -TARGET_CFLAGS += \ - -D_GNU_SOURCE \ - -I$(STAGING_DIR)/usr/include/libnl-tiny - -define Build/Compile - CFLAGS="$(TARGET_CFLAGS)" \ - $(MAKE) -C $(PKG_BUILD_DIR) \ - CC="$(TARGET_CC)" \ - LIBS="$(TARGET_LDFLAGS) -lnl-tiny -lpopt" -endef - define Package/ipvsadm/install $(INSTALL_DIR) $(1)/sbin $(INSTALL_BIN) $(PKG_BUILD_DIR)/ipvsadm $(1)/sbin/ diff --git a/net/kcptun/Makefile b/net/kcptun/Makefile new file mode 100644 index 000000000..b39a11271 --- /dev/null +++ b/net/kcptun/Makefile @@ -0,0 +1,74 @@ +include $(TOPDIR)/rules.mk + +PKG_NAME:=kcptun +PKG_VERSION:=20190725 +PKG_RELEASE:=1 + +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=https://codeload.github.com/xtaci/kcptun/tar.gz/v${PKG_VERSION}? +PKG_HASH:=65c0d0d4f7e3bb3c3b91e23ff2eb6621455d6d376a4f17e6fb2017337ce711c1 + +PKG_MAINTAINER:=Dengfeng Liu <liudf0716@gmail.com> +PKG_LICENSE:=MIT +PKG_LICENSE_FILES:=LICENSE + +PKG_BUILD_DEPENDS:=golang/host +PKG_BUILD_PARALLEL:=1 +PKG_USE_MIPS16:=0 + +GO_PKG:=github.com/xtaci/kcptun + +include $(INCLUDE_DIR)/package.mk +include ../../lang/golang/golang-package.mk + +define Package/kcptun/template + SECTION:=net + CATEGORY:=Network + SUBMENU:=Web Servers/Proxies + TITLE:=KCP-based Secure Tunnel + URL:=https://github.com/xtaci/kcptun + DEPENDS:=$(GO_ARCH_DEPENDS) +endef + +define Package/kcptun-c + $(call Package/kcptun/template) + TITLE+= (client) +endef + +define Package/kcptun-s + $(call Package/kcptun/template) + TITLE+= (server) +endef + +define Package/kcptun/description + kcptun is a Stable & Secure Tunnel Based On KCP with N:M Multiplexing +endef +Package/kcptun-c/description = $(Package/kcptun/description) +Package/kcptun-s/description = $(Package/kcptun/description) + +GO_PKG_LDFLAGS_X:=main.VERSION=$(PKG_VERSION) +GO_PKG_LDFLAGS:=-s -w + +define Package/kcptun/install + $(call GoPackage/Package/Install/Bin,$(PKG_INSTALL_DIR)) + + $(INSTALL_DIR) $(1)/usr/bin/ + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/$(2) $(1)/usr/bin/$(3) + $(INSTALL_DIR) $(1)/etc/config/ + $(INSTALL_CONF) ./files/$(3).conf $(1)/etc/config/$(3) + $(INSTALL_DIR) $(1)/etc/init.d/ + $(INSTALL_BIN) ./files/$(3).init $(1)/etc/init.d/$(3) +endef + +define Package/kcptun-c/install + $(call Package/kcptun/install,$(1),client,kcptun-c) +endef + +define Package/kcptun-s/install + $(call Package/kcptun/install,$(1),server,kcptun-s) +endef + +$(eval $(call GoBinPackage,kcptun-c)) +$(eval $(call BuildPackage,kcptun-c)) +$(eval $(call GoBinPackage,kcptun-s)) +$(eval $(call BuildPackage,kcptun-s)) diff --git a/net/kcptun/files/kcptun-c.conf b/net/kcptun/files/kcptun-c.conf new file mode 100644 index 000000000..adef9e3ea --- /dev/null +++ b/net/kcptun/files/kcptun-c.conf @@ -0,0 +1,9 @@ +config kcptun + option local_port 12948 # this port should be your service port + option remote_ip 'your vps ip' + option remote_port 29900 + option mode 'fast' + option nocomp 1 + option sndwnd 128 + option rcvwnd 512 + option disabled 1 # set 0 to enable it diff --git a/net/kcptun/files/kcptun-c.init b/net/kcptun/files/kcptun-c.init new file mode 100644 index 000000000..653d8d817 --- /dev/null +++ b/net/kcptun/files/kcptun-c.init @@ -0,0 +1,56 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2019 Dengfeng Liu + +START=99 + +USE_PROCD=1 +NAME=kcptun-c +PROG=/usr/bin/${NAME} + +validate_section_kcptun() +{ + uci_load_validate "${NAME}" kcptun "$1" "$2" \ + 'local_port:port' \ + 'remote_ip:string' \ + 'remote_port:port' \ + 'mode:string' \ + 'nocomp:bool' \ + 'sndwnd:uinteger' \ + 'rcvwnd:uinteger' \ + 'disabled:bool' +} + +kcptun_instance() +{ + [ "$2" = 0 ] || { + echo "validation failed" + return 1 + } + + [ "${disabled}" = "1" ] && return 1 + + [ "${local_port}" -gt 0 ] && [ "${local_port}" -lt 65536 ] || return 1 + + [ "${remote_port}" -gt 0 ] && [ "${remote_port}" -lt 65536 ] || return 1 + + [ -n "${remote_ip}" ] || { + return 1 + } + + procd_open_instance + procd_set_param command "${PROG}" + procd_append_param command --localaddr ":${local_port}" + procd_append_param command --remoteaddr "${remote_ip}:${remote_port}" + [ -n "${mode}" ] && procd_append_param command --mode "${mode}" + [ "${nocomp}" -eq 1 ] && procd_append_param command --nocomp + [ "${sndwnd}" -gt 0 ] && procd_append_param command --sndwnd "${sndwnd}" + [ "${rcvwnd}" -gt 0 ] && procd_append_param command --rcvwnd "${rcvwnd}" + procd_set_param respawn + procd_close_instance +} + +start_service() +{ + config_load "${NAME}" + config_foreach validate_section_kcptun kcptun kcptun_instance +} diff --git a/net/kcptun/files/kcptun-s.conf b/net/kcptun/files/kcptun-s.conf new file mode 100644 index 000000000..3537f64b5 --- /dev/null +++ b/net/kcptun/files/kcptun-s.conf @@ -0,0 +1,9 @@ +config kcptun + option local_port 29900 + option target_ip '127.0.0.1' + option target_port 12948 # this port should be your service port + option mode 'fast' + option nocomp 1 + option sndwnd 1024 + option rcvwnd 1024 + option disabled 1 # set 0 to enable it diff --git a/net/kcptun/files/kcptun-s.init b/net/kcptun/files/kcptun-s.init new file mode 100644 index 000000000..cfcb81add --- /dev/null +++ b/net/kcptun/files/kcptun-s.init @@ -0,0 +1,56 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2019 Dengfeng Liu + +START=99 + +USE_PROCD=1 +NAME=kcptun-s +PROG=/usr/bin/${NAME} + +validate_section_kcptun() +{ + uci_load_validate "${NAME}" kcptun "$1" "$2" \ + 'local_port:port' \ + 'target_ip:string' \ + 'target_port:port' \ + 'mode:string' \ + 'nocomp:bool' \ + 'sndwnd:uinteger' \ + 'rcvwnd:uinteger' \ + 'disabled:bool' +} + +kcptun_instance() +{ + [ "$2" = 0 ] || { + echo "validation failed" + return 1 + } + + [ "${disabled}" = "1" ] && return 1 + + [ "${local_port}" -gt 0 ] && [ "${local_port}" -lt 65536 ] || return 1 + + [ "${target_port}" -gt 0 ] && [ "${target_port}" -lt 65536 ] || return 1 + + [ -n "${target_ip}" ] || { + return 1 + } + + procd_open_instance + procd_set_param command "${PROG}" + procd_append_param command --listen ":${local_port}" + procd_append_param command --target "${target_ip}:${target_port}" + [ -n "${mode}" ] && procd_append_param command --mode "${mode}" + [ "${nocomp}" -eq 1 ] && procd_append_param command --nocomp + [ "${sndwnd}" -gt 0 ] && procd_append_param command --sndwnd "${sndwnd}" + [ "${rcvwnd}" -gt 0 ] && procd_append_param command --rcvwnd "${rcvwnd}" + procd_set_param respawn + procd_close_instance +} + +start_service() +{ + config_load "${NAME}" + config_foreach validate_section_kcptun kcptun kcptun_instance +} diff --git a/net/keepalived/Makefile b/net/keepalived/Makefile index 24da2bc24..9310b3078 100644 --- a/net/keepalived/Makefile +++ b/net/keepalived/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=keepalived -PKG_VERSION:=2.0.16 +PKG_VERSION:=2.0.18 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://www.keepalived.org/software -PKG_HASH:=f0c7dc86147a286913c1c2c918f557735016285d25779d4d2fce5732fcb888df +PKG_HASH:=1423a2b1b8e541211029b9e1e1452e683bbe5f4b0b287eddd609aaf5ff024fd0 PKG_CPE_ID:=cpe:/a:keepalived:keepalived PKG_LICENSE:=GPL-2.0+ diff --git a/net/keepalived/files/keepalived.init b/net/keepalived/files/keepalived.init index 4e63711f4..cf425e872 100644 --- a/net/keepalived/files/keepalived.init +++ b/net/keepalived/files/keepalived.init @@ -8,104 +8,106 @@ USE_PROCD=1 KEEPALIVED_CONF=/tmp/keepalived.conf -INDENT_1=\\t -INDENT_2=$INDENT_1$INDENT_1 -INDENT_3=$INDENT_1$INDENT_1$INDENT_1 -INDENT_4=$INDENT_1$INDENT_1$INDENT_1$INDENT_1 +INDENT_1="\t" +INDENT_2="${INDENT_1}${INDENT_1}" +INDENT_3="${INDENT_1}${INDENT_1}${INDENT_1}" +INDENT_4="${INDENT_1}${INDENT_1}${INDENT_1}${INDENT_1}" config_section_open() { - local tag=$1 - local name=$2 + local tag="$1" + local name="$2" - printf "$tag" >> $KEEPALIVED_CONF - [ -n "$name" ] && printf " $name" >> $KEEPALIVED_CONF - printf " {\n" >> $KEEPALIVED_CONF + printf '%s' "$tag" >> "$KEEPALIVED_CONF" + [ -n "$name" ] && printf ' %s' "$name" >> "$KEEPALIVED_CONF" + printf ' {\n' >> "$KEEPALIVED_CONF" } config_section_close() { - printf "}\n\n" >> $KEEPALIVED_CONF + printf '}\n\n' >> "$KEEPALIVED_CONF" } config_foreach_wrapper() { - local section=$1 - local function=$1 + local section="$1" + local function="$1" # Convention is that 'function' and 'section' are the same - config_foreach $function $section + config_foreach "$function" "$section" } print_elems_indent() { - local config=$1 + local config="$1" shift - local indent=$1 + local indent="$1" shift + [ -z "$indent" ] && indent="$INDENT_1" - for opt in $*; do - local $opt + for opt in "$@"; do + local "$opt" + local optval local no_val=0 - if [ ${opt:0:7} == "no_val_" ]; then - opt=${opt:7} + if [ "${opt:0:7}" = "no_val_" ]; then + opt="${opt:7}" no_val=1 fi - config_get $opt $config $opt - eval optval=\$$opt + config_get "$opt" "$config" "$opt" + eval optval=\$"$opt" [ -z "$optval" ] && continue - printf "$indent$opt" >> $KEEPALIVED_CONF - [ "$no_val" == "0" ] && { - local words=$(echo "$optval" | wc -w) - if [ $words -gt 1 ]; then - printf " \"$optval\"" >> $KEEPALIVED_CONF + printf '%b%s' "$indent" "$opt" >> "$KEEPALIVED_CONF" + [ "$no_val" = "0" ] && { + local words=0 + words="$(echo "$optval" | wc -w)" + if [ "$words" -gt 1 ]; then + printf ' "%s"' "$optval" >> "$KEEPALIVED_CONF" else - printf " $optval" >> $KEEPALIVED_CONF + printf ' %s' "$optval" >> "$KEEPALIVED_CONF" fi } - printf "\n" >> $KEEPALIVED_CONF + printf '\n' >> "$KEEPALIVED_CONF" done unset optval } print_list_indent() { - local lst=$1 - local indent=$2 + local lst="$1" + local indent="$2" local lst_elems - [ -z "$indent" ] && indent=$INDENT_1 + [ -z "$indent" ] && indent="$INDENT_1" - eval lst_elems=\$$lst + eval lst_elems=\$"$lst" [ -z "$lst_elems" ] && return 0 - printf "$indent$lst {\n" >> $KEEPALIVED_CONF + printf '%b%s {\n' "$indent" "$lst" >> "$KEEPALIVED_CONF" for e in $lst_elems; do - [ -n "$eval_item_func" ] - printf "$indent$INDENT_1$e\n" >> $KEEPALIVED_CONF + printf '%b%s\n' "${indent}${INDENT_1}" "$e">> "$KEEPALIVED_CONF" done - printf "$indent}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "$indent" >> "$KEEPALIVED_CONF" } print_notify() { - local type=$1 + local type="$1" shift - local name=$1 + local name="$1" shift - for notify in $*; do - printf "$INDENT_1$notify" >> $KEEPALIVED_CONF - notify=$(echo $notify | tr 'a-z' 'A-Z') - printf " \"/bin/busybox env -i ACTION=$notify TYPE=$type NAME=$name /sbin/hotplug-call keepalived\"\n" >> $KEEPALIVED_CONF + for notify in "$@"; do + printf '%b%s' "${INDENT_1}" "$notify">> "$KEEPALIVED_CONF" + notify="$(echo "$notify" | tr 'a-z' 'A-Z')" + printf ' "/bin/busybox env -i ACTION=%s TYPE=%s NAME=%s /sbin/hotplug-call keepalived"\n' "$notify" "$type" "$name" >> "$KEEPALIVED_CONF" done } global_defs() { local linkbeat_use_polling notification_email - config_get alt_config_file $1 alt_config_file + config_get alt_config_file "$1" alt_config_file [ -z "$alt_config_file" ] || return 0 - config_get_bool linkbeat_use_polling $1 linkbeat_use_polling 0 - [ $linkbeat_use_polling -gt 0 ] && printf "linkbeat_use_polling\n\n" >> $KEEPALIVED_CONF + config_get_bool linkbeat_use_polling "$1" linkbeat_use_polling 0 + [ "$linkbeat_use_polling" -gt 0 ] && printf 'linkbeat_use_polling\n\n' >> "$KEEPALIVED_CONF" - config_get notification_email $1 notification_email + config_get notification_email "$1" notification_email print_list_indent notification_email - print_elems_indent $1 $INDENT_1 \ + print_elems_indent "$1" "$INDENT_1" \ notification_email_from \ smtp_server \ smtp_connect_timeout \ @@ -116,129 +118,129 @@ global_defs() { } print_ipaddress_indent() { - local section=$1 - local curr_ipaddr=$2 - local indent=$3 + local section="$1" + local curr_ipaddr="$2" + local indent="$3" local address device scope name - config_get name $section name + config_get name "$section" name [ "$name" != "$curr_ipaddr" ] && return 0 - config_get address $section address - config_get device $section device - config_get scope $section scope + config_get address "$section" address + config_get device "$section" device + config_get scope "$section" scope # Default indent - [ -z "$indent" ] && indent=$INDENT_1 + [ -z "$indent" ] && indent="$INDENT_1" # If no address exit [ -z "$address" ] && return 0 if [ -z "$device" ]; then - printf "$indent$address" >> $KEEPALIVED_CONF + printf '%b%s' "$indent" "$address" >> "$KEEPALIVED_CONF" else # Add IP address/netmask and device - printf "$indent$address dev $device" >> $KEEPALIVED_CONF + printf '%b%s dev %s' "$indent" "$address" "$device">> "$KEEPALIVED_CONF" # Add scope - [ -n "$scope" ] && printf " scope $scope" >> $KEEPALIVED_CONF + [ -n "$scope" ] && printf ' scope %s' "$scope" >> "$KEEPALIVED_CONF" fi - printf "\n" >> $KEEPALIVED_CONF + printf '\n' >> "$KEEPALIVED_CONF" } static_ipaddress() { local address config_get address "$1" address for a in $address; do - config_foreach print_ipaddress_indent ipaddress $a + config_foreach print_ipaddress_indent ipaddress "$a" done } print_route_indent() { - local section=$1 - local curr_route=$2 - local indent=$3 + local section="$1" + local curr_route="$2" + local indent="$3" local name blackhole address src_addr gateway device scope table - config_get name $section name + config_get name "$section" name [ "$name" != "$curr_route" ] && return 0 - config_get_bool blackhole $section blackhole 0 - config_get address $section address - config_get src_addr $section src_addr - config_get gateway $section gateway - config_get device $section device - config_get table $section table + config_get_bool blackhole "$section" blackhole 0 + config_get address "$section" address + config_get src_addr "$section" src_addr + config_get gateway "$section" gateway + config_get device "$section" device + config_get table "$section" table # If no address exit [ -z "$address" ] && return 0 # Default indent - [ -z "$indent" ] && indent=$INDENT_1 + [ -z "$indent" ] && indent="$INDENT_1" - [ $blackhole -gt 0 ] && { - printf "${indent}blackhole $address\n" >> $KEEPALIVED_CONF + [ "$blackhole" -gt 0 ] && { + printf '%bblackhole %s\n' "$indent" "$address" >> "$KEEPALIVED_CONF" return 0 } # Add src addr or address if [ -n "$src_addr" ]; then - printf "${indent}src $src_addr $address" >> $KEEPALIVED_CONF + printf '%bsrc %s %s' "$indent" "$src_addr" "$address" >> "$KEEPALIVED_CONF" else [ -z "$device" ] && return 0 - printf "$indent$address" >> $KEEPALIVED_CONF + printf '%b%s' "$indent" "$address" >> "$KEEPALIVED_CONF" fi # Add route/gateway - [ -n "$gateway" ] && printf " via $gateway" >> $KEEPALIVED_CONF + [ -n "$gateway" ] && printf ' via %s' "$gateway" >> "$KEEPALIVED_CONF" # Add device - printf " dev $device" >> $KEEPALIVED_CONF + printf ' dev %s' "$device" >> "$KEEPALIVED_CONF" # Add scope - [ -n "$scope" ] && printf " scope $scope" >> $KEEPALIVED_CONF + [ -n "$scope" ] && printf ' scope %s' "$scope" >> "$KEEPALIVED_CONF" # Add table - [ -n "$table" ] && printf " table $table" >> $KEEPALIVED_CONF - printf "\n" >> $KEEPALIVED_CONF + [ -n "$table" ] && printf ' table %s' "$table" >> "$KEEPALIVED_CONF" + printf '\n' >> "$KEEPALIVED_CONF" } print_track_elem_indent() { - local section=$1 - local curr_track_elem=$2 - local indent=$3 + local section="$1" + local curr_track_elem="$2" + local indent="$3" - local script name value - config_get name $section name + local name value + config_get name "$section" name [ "$name" != "$curr_track_elem" ] && return 0 - config_get value $section value - config_get weight $section weight + config_get value "$section" value + config_get weight "$section" weight [ -z "$value" ] && return 0 - printf "$indent$value" >> $KEEPALIVED_CONF - [ -n "$weight" ] && printf " weight $weight" >> $KEEPALIVED_CONF - printf "\n" >> $KEEPALIVED_CONF + printf '%b%s' "$indent" "$value" >> "$KEEPALIVED_CONF" + [ -n "$weight" ] && printf ' weight %s' "$weight" >> "$KEEPALIVED_CONF" + printf '\n' >> "$KEEPALIVED_CONF" } static_routes() { local route config_get route "$1" route for r in $route; do - config_foreach print_route_indent route $r + config_foreach print_route_indent route "$r" done } # Count 'vrrp_instance' with the given name ; called by vrrp_instance_check() vrrp_instance_name_count() { local name - config_get name $1 name - [ "$name" == "$2" ] && count=$((count + 1)) + config_get name "$1" name + [ "$name" = "$2" ] && count="$((count + 1))" } # Check if there's a 'vrrp_instance' section with the given name vrrp_instance_check() { - local count=0 - local name=$1 - config_foreach vrrp_instance_name_count vrrp_instance $name + local count="0" + local name="$1" + config_foreach vrrp_instance_name_count vrrp_instance "$name" [ $count -gt 0 ] && return 0 || return 1 } @@ -247,17 +249,17 @@ vrrp_sync_group() { local valid_group # No name for group, exit - config_get name $1 name + config_get name "$1" name [ -z "$name" ] && return 0 # No members for group, exit - config_get group $1 group + config_get group "$1" group [ -z "$group" ] && return 0 # Check if we have 'vrrp_instance's defined for # each member and remove names with not vrrp_instance defined for m in $group; do - vrrp_instance_check $m && valid_group="$valid_group $m" + vrrp_instance_check "$m" && valid_group="$valid_group $m" done [ -z "$valid_group" ] && return 0 @@ -266,7 +268,7 @@ vrrp_sync_group() { group="$valid_group" print_list_indent group - print_elems_indent $1 $INDENT_1 no_val_smtp_alert no_val_global_tracking + print_elems_indent "$1" "$INDENT_1" no_val_smtp_alert no_val_global_tracking print_notify "GROUP" "$name" notify_backup notify_master \ notify_fault notify @@ -277,21 +279,21 @@ vrrp_sync_group() { vrrp_instance() { local name auth_type auth_pass - config_get name $1 name + config_get name "$1" name [ -z "$name" ] && return 0 config_section_open "vrrp_instance" "$name" - config_get auth_type $1 auth_type - config_get auth_pass $1 auth_pass - [ -n "$auth_type" -a -n "$auth_pass" ] && { - printf "${INDENT_1}authentication {\n" >> $KEEPALIVED_CONF - printf "${INDENT_2}auth_type $auth_type\n" >> $KEEPALIVED_CONF - printf "${INDENT_2}auth_pass $auth_pass\n" >> $KEEPALIVED_CONF - printf "$INDENT_1}\n" >> $KEEPALIVED_CONF + config_get auth_type "$1" auth_type + config_get auth_pass "$1" auth_pass + [ -n "$auth_type" ] && [ -n "$auth_pass" ] && { + printf '%bauthentication {\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" + printf '%bauth_type %s\n' "${INDENT_2}" "$auth_type" >> "$KEEPALIVED_CONF" + printf '%bauth_pass %s\n' "${INDENT_2}" "$auth_pass" >> "$KEEPALIVED_CONF" + printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" } - print_elems_indent $1 $INDENT_1 state interface \ + print_elems_indent "$1" "$INDENT_1" state interface \ mcast_src_ip unicast_src_ip virtual_router_id version priority \ advert_int preempt_delay debug \ lvs_sync_daemon_interface garp_master_delay garp_master_refresh \ @@ -305,56 +307,56 @@ vrrp_instance() { # Handle virtual_ipaddress & virtual_ipaddress_excluded lists for opt in virtual_ipaddress virtual_ipaddress_excluded; do - config_get $opt $1 $opt + config_get "$opt" "$1" "$opt" eval optval=\$$opt [ -z "$optval" ] && continue - printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF + printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF" for a in $optval; do - config_foreach print_ipaddress_indent ipaddress $a $INDENT_2 + config_foreach print_ipaddress_indent ipaddress "$a" "$INDENT_2" done - printf "$INDENT_1}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" done # Handle virtual_routes for opt in virtual_routes; do - config_get $opt $1 $opt + config_get "$opt" "$1" "$opt" eval optval=\$$opt [ -z "$optval" ] && continue - printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF + printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF" for r in $optval; do - config_foreach print_route_indent route $r $INDENT_2 + config_foreach print_route_indent route "$r" "$INDENT_2" done - printf "$INDENT_1}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" done # Handle track_script lists for opt in track_script; do - config_get $opt $1 $opt + config_get "$opt" "$1" "$opt" eval optval=\$$opt [ -z "$optval" ] && continue - printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF + printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF" for t in $optval; do - printf "$INDENT_2$optval\n" >> $KEEPALIVED_CONF + printf '%b%s\n' "${INDENT_2}" "$optval" >> "$KEEPALIVED_CONF" done - printf "$INDENT_1}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" done # Handle track_interface lists for opt in track_interface; do - config_get $opt $1 $opt + config_get "$opt" "$1" "$opt" eval optval=\$$opt [ -z "$optval" ] && continue - printf "$INDENT_1$opt {\n" >> $KEEPALIVED_CONF + printf '%b%s {\n' "${INDENT_1}" "$opt" >> "$KEEPALIVED_CONF" for t in $optval; do - config_foreach print_track_elem_indent track_interface $t $INDENT_2 + config_foreach print_track_elem_indent track_interface "$t" "$INDENT_2" done - printf "$INDENT_1}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" done # Handle simple lists of strings (with no spaces in between) for opt in unicast_peer; do - config_get $opt $1 $opt - print_list_indent $opt + config_get "$opt" "$1" "$opt" + print_list_indent "$opt" done unset optval @@ -364,12 +366,12 @@ vrrp_instance() { vrrp_script() { local name - config_get name $1 name + config_get name "$1" name [ -z "$name" ] && return 0 config_section_open "vrrp_script" "$name" - print_elems_indent $1 $INDENT_1 script interval weight fall rise + print_elems_indent "$1" "$INDENT_1" script interval weight fall rise config_section_close } @@ -379,17 +381,17 @@ url() { local name path digest - config_get name $1 name + config_get name "$1" name [ "$url" = "$name" ] || return 0 - config_get path $1 path - config_get digest $1 digest + config_get path "$1" path + config_get digest "$1" digest - [ -n "$digest" -a -n "$path" ] && { - printf "${INDENT_3}url {\n" >> $KEEPALIVED_CONF - printf "${INDENT_4}path "$path"\n" >> $KEEPALIVED_CONF - printf "${INDENT_4}digest $digest\n" >> $KEEPALIVED_CONF - printf "${INDENT_3}}\n" >> $KEEPALIVED_CONF + [ -n "$digest" ] && [ -n "$path" ] && { + printf '%burl {\n' "${INDENT_3}" >> "$KEEPALIVED_CONF" + printf '%bpath %s\n' "${INDENT_4}" "$path" >> "$KEEPALIVED_CONF" + printf '%bdigest %s\n' "${INDENT_4}" "$digest" >> "$KEEPALIVED_CONF" + printf '%b}\n' "${INDENT_3}" >> "$KEEPALIVED_CONF" } } @@ -402,44 +404,44 @@ real_server() { local enabled name weight ipaddr port check - config_get_bool enabled $1 enabled 1 + config_get_bool enabled "$1" enabled 1 [ "$enabled" -eq 1 ] || return 0 - config_get name $1 name + config_get name "$1" name [ "$server" = "$name" ] || return 0 - config_get weight $1 weight + config_get weight "$1" weight [ -n "$weight" ] || return 0 - config_get ipaddr $1 ipaddr - config_get port $1 port - config_get check $1 check + config_get ipaddr "$1" ipaddr + config_get port "$1" port + config_get check "$1" check - [ -n "$ipaddr" -a -n "$port" ] && { - printf "${INDENT_1}real_server $ipaddr $port {\n" >> $KEEPALIVED_CONF - printf "${INDENT_2}weight $weight\n" >> $KEEPALIVED_CONF + [ -n "$ipaddr" ] && [ -n "$port" ] && { + printf '%breal_server %s %d {\n' "${INDENT_1}" "$ipaddr" "$port" >> "$KEEPALIVED_CONF" + printf '%bweight %d\n' "${INDENT_2}" "$weight" >> "$KEEPALIVED_CONF" case "$check" in TCP_CHECK) - printf "${INDENT_2}${check} {\n" >> $KEEPALIVED_CONF - print_elems_indent $1 $INDENT_3 connect_timeout \ + printf '%b%s {\n' "${INDENT_2}" "$check" >> "$KEEPALIVED_CONF" + print_elems_indent "$1" "$INDENT_3" connect_timeout \ connect_port - printf "${INDENT_2}}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "${INDENT_2}" >> "$KEEPALIVED_CONF" ;; MISC_CHECK) - printf "${INDENT_2}${check} {\n" >> $KEEPALIVED_CONF - print_elems_indent $1 $INDENT_3 misc_path - printf "${INDENT_2}}\n" >> $KEEPALIVED_CONF + printf '%b%s {\n' "${INDENT_2}" "$check" >> "$KEEPALIVED_CONF" + print_elems_indent "$1" "$INDENT_3" misc_path + printf '%b}\n' "${INDENT_2}" >> "$KEEPALIVED_CONF" ;; HTTP_GET | SSL_GET) - printf "${INDENT_2}${check} {\n" >> $KEEPALIVED_CONF - print_elems_indent $1 $INDENT_3 connect_timeout \ + printf '%b%s {\n' "${INDENT_2}" "$check" >> "$KEEPALIVED_CONF" + print_elems_indent "$1" "$INDENT_3" connect_timeout \ connect_port nb_get_retry delay_before_retry # Handle url list - config_list_foreach $1 url url_list - printf "${INDENT_2}}\n" >> $KEEPALIVED_CONF + config_list_foreach "$1" url url_list + printf '%b}\n' "${INDENT_2}" >> "$KEEPALIVED_CONF" ;; esac - printf "${INDENT_1}}\n" >> $KEEPALIVED_CONF + printf '%b}\n' "${INDENT_1}" >> "$KEEPALIVED_CONF" } } @@ -450,33 +452,33 @@ real_server_list() { virtual_server() { local enabled ipaddr port lb_algo sorry_server_ip sorry_server_port - config_get_bool enabled $1 enabled 1 + config_get_bool enabled "$1" enabled 1 [ "$enabled" -eq 1 ] || return 0 - config_get ipaddr $1 ipaddr + config_get ipaddr "$1" ipaddr [ -z "$ipaddr" ] && return 0 - config_get port $1 port + config_get port "$1" port [ -z "$port" ] && return 0 config_section_open "virtual_server" "$ipaddr $port" - print_elems_indent $1 $INDENT_1 fwmark delay_loop \ + print_elems_indent "$1" "$INDENT_1" fwmark delay_loop \ lb_kind persistence_timeout persistence_granularity \ virtualhost protocol - config_get lb_algo $1 lb_algo + config_get lb_algo "$1" lb_algo [ -z "$lb_algo" ] && lb_algo="rr" - modprobe ip_vs_${lb_algo} 2>&1 1>/dev/null - printf "${INDENT_1}lb_algo ${lb_algo}\n" >> $KEEPALIVED_CONF + modprobe ip_vs_${lb_algo} 1>/dev/null 2>&1 + printf '%blb_algo %s\n' "${INDENT_1}" "${lb_algo}" >> "$KEEPALIVED_CONF" - config_get sorry_server_ip $1 sorry_server_ip - config_get sorry_server_port $1 sorry_server_port - [ -n "$sorry_server_ip" -a -n "$sorry_server_port" ] && { - printf "${INDENT_1}sorry_server $sorry_server_ip $sorry_server_port\n" >> $KEEPALIVED_CONF + config_get sorry_server_ip "$1" sorry_server_ip + config_get sorry_server_port "$1" sorry_server_port + [ -n "$sorry_server_ip" ] && [ -n "$sorry_server_port" ] && { + printf '%bsorry_server %s %s\n' "${INDENT_1}" "$sorry_server_ip" "$sorry_server_port" >> "$KEEPALIVED_CONF" } # Handle real_server list - config_list_foreach $1 real_server real_server_list + config_list_foreach "$1" real_server real_server_list config_section_close } @@ -484,11 +486,11 @@ virtual_server() { process_config() { local alt_config_file - rm -f $KEEPALIVED_CONF + rm -f "$KEEPALIVED_CONF" # First line - printf "! Configuration file for keepalived (autogenerated via init script)\n" > $KEEPALIVED_CONF - printf "! Written %s\n\n" "$(date +'%c')" >> $KEEPALIVED_CONF + printf '! Configuration file for keepalived (autogenerated via init script)\n' > "$KEEPALIVED_CONF" + printf '! Written %s\n\n' "$(date +'%c')" >> "$KEEPALIVED_CONF" [ -f /etc/config/keepalived ] || return 0 config_load 'keepalived' @@ -499,9 +501,9 @@ process_config() { # If "alt_config_file" specified, use that instead [ -n "$alt_config_file" ] && [ -f "$alt_config_file" ] && { - rm -f $KEEPALIVED_CONF + rm -f "$KEEPALIVED_CONF" # Symlink "alt_config_file" since it's a bit easier and safer - ln -s $alt_config_file $KEEPALIVED_CONF + ln -s "$alt_config_file" "$KEEPALIVED_CONF" return 0 } diff --git a/net/knot/Makefile b/net/knot/Makefile index ade9bc0f9..23f49f041 100644 --- a/net/knot/Makefile +++ b/net/knot/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=knot -PKG_VERSION:=2.8.2 +PKG_VERSION:=2.8.3 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=https://secure.nic.cz/files/knot-dns/ -PKG_HASH:=00d24361a2406392c508904fad943536bae6369981686b4951378fc1c9a5a137 +PKG_HASH:=8a62d81e5cf3df938f469b60ed4e46d9161007c2b89fbf7ae07525fa68368bad PKG_MAINTAINER:=Daniel Salzman <daniel.salzman@nic.cz> PKG_LICENSE:=GPL-3.0 LGPL-2.0 0BSD BSD-3-Clause OLDAP-2.8 @@ -44,7 +44,7 @@ endef define Package/knot-libs $(call Package/knot-lib/Default) TITLE+= common DNS and DNSSEC libraries - DEPENDS+=+libgnutls + DEPENDS+=+libgnutls +lmdb endef define Package/knot-libzscanner diff --git a/net/lksctp-tools/Makefile b/net/lksctp-tools/Makefile index 9e85b91a4..5a89aa370 100644 --- a/net/lksctp-tools/Makefile +++ b/net/lksctp-tools/Makefile @@ -29,7 +29,7 @@ define Package/lksctp-tools/Default SECTION:=net CATEGORY:=Network TITLE:=SCTP user-land - URL:=http://lksctp.sourceforge.net + URL:=https://github.com/sctp/lksctp-tools endef define Package/libsctp diff --git a/net/miniupnpc/Makefile b/net/miniupnpc/Makefile index 7e7fddab5..7ad1a284b 100644 --- a/net/miniupnpc/Makefile +++ b/net/miniupnpc/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=miniupnpc -PKG_VERSION:=2.1.20190408 -PKG_RELEASE:=2 +PKG_VERSION:=2.1.20190625 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://miniupnp.tuxfamily.org/files -PKG_HASH:=a0c46bcf6065d6351a8fa6a0a18dc57d10a16908dbb470908fd2e423511514ec +PKG_HASH:=8723f5d7fd7970de23635547700878cd29a5c2bb708b5e5475b2d1d2510317fb PKG_MAINTAINER:=Steven Barth <cyrus@openwrt.org> PKG_LICENSE:=BSD-3c diff --git a/net/miniupnpc/patches/100-no-fPIC.patch b/net/miniupnpc/patches/100-no-fPIC.patch index 1e8b96487..ce4c6752c 100644 --- a/net/miniupnpc/patches/100-no-fPIC.patch +++ b/net/miniupnpc/patches/100-no-fPIC.patch @@ -1,6 +1,6 @@ --- a/CMakeLists.txt +++ b/CMakeLists.txt -@@ -38,12 +38,6 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") +@@ -39,12 +39,6 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") target_compile_definitions(miniupnpc-private INTERFACE _DARWIN_C_SOURCE) endif () diff --git a/net/miniupnpc/patches/300-add-listdevices-to-cmake.patch b/net/miniupnpc/patches/300-add-listdevices-to-cmake.patch deleted file mode 100644 index e8d790058..000000000 --- a/net/miniupnpc/patches/300-add-listdevices-to-cmake.patch +++ /dev/null @@ -1,21 +0,0 @@ ---- a/CMakeLists.txt -+++ b/CMakeLists.txt -@@ -66,6 +66,7 @@ set (MINIUPNPC_SOURCES - connecthostport.c - portlistingparse.c - receivedata.c -+ listdevices.c - connecthostport.h - igd_desc_parse.h - minisoap.h -@@ -142,6 +143,10 @@ if (UPNPC_BUILD_SHARED) - add_executable (upnpc-shared upnpc.c) - target_link_libraries (upnpc-shared PRIVATE libminiupnpc-shared) - target_include_directories(upnpc-shared PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) -+ -+ add_executable (listdevices listdevices.c) -+ target_link_libraries (listdevices PRIVATE libminiupnpc-shared) -+ target_include_directories(listdevices PRIVATE ${CMAKE_CURRENT_BINARY_DIR}) - endif () - endif () - diff --git a/net/mosquitto/Makefile b/net/mosquitto/Makefile index 37ee7198e..290f3c4e3 100644 --- a/net/mosquitto/Makefile +++ b/net/mosquitto/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=mosquitto -PKG_VERSION:=1.6.3 +PKG_VERSION:=1.6.4 PKG_RELEASE:=1 PKG_LICENSE:=BSD-3-Clause PKG_LICENSE_FILES:=LICENSE.txt @@ -17,7 +17,7 @@ PKG_CPE_ID:=cpe:/a:eclipse:mosquitto PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://mosquitto.org/files/source/ -PKG_HASH:=9ef5cc75f4fe31d7bf50654ddf4728ad9e1ae2e5609a4b42ecbbcb4a209ed17e +PKG_HASH:=a3d5822c249f6a6e13311b1b09eff6807ea01608a5a77934e1769842e9d146ef PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) include $(INCLUDE_DIR)/package.mk diff --git a/net/netifyd/Makefile b/net/netifyd/Makefile index 9c3faab9b..63417a162 100644 --- a/net/netifyd/Makefile +++ b/net/netifyd/Makefile @@ -16,10 +16,10 @@ PKG_INSTALL:=1 PKG_SOURCE_PROTO:=git PKG_SOURCE_URL:=https://gitlab.com/netify.ai/public/netify-agent.git -PKG_SOURCE_DATE:=2019-06-06 -PKG_SOURCE_VERSION:=v2.88 -#PKG_SOURCE_VERSION:=367ddd2fca4b2edd5e71145e2adea1b58f750214 -PKG_MIRROR_HASH:=8ead41dc074a71626609bced1d584f8df87e39f5ad76dcca76021c1737150089 +PKG_SOURCE_DATE:=2019-08-09 +PKG_SOURCE_VERSION:=v2.91 +#PKG_SOURCE_VERSION:=edb904b417a42a1421474427f03e91e0400d8729 +PKG_MIRROR_HASH:=9a7c6a84fc35677f65ac7ff84f228b0051204fae388869042d7623c141ec4165 include $(INCLUDE_DIR)/package.mk @@ -28,7 +28,7 @@ define Package/netifyd CATEGORY:=Network TITLE:=Netify Agent URL:=http://www.netify.ai/ - DEPENDS:=+libcurl +libmnl +libnetfilter-conntrack +libjson-c +libpcap +zlib +libpthread @!USE_UCLIBC + DEPENDS:=+ca-bundle +libcurl +libmnl +libnetfilter-conntrack +libjson-c +libpcap +zlib +libpthread @!USE_UCLIBC # Explicitly depend on libstdcpp rather than $(CXX_DEPENDS). At the moment # std::unordered_map is only available via libstdcpp which is required for # performance reasons. @@ -54,7 +54,6 @@ TARGET_LDFLAGS+=-Wl,--gc-sections CONFIGURE_ARGS+= \ --sharedstatedir=/var/run \ --enable-lean-and-mean \ - --disable-ncurses \ --disable-libtcmalloc \ --without-systemdsystemunitdir \ --without-tmpfilesdir diff --git a/net/nginx/Makefile b/net/nginx/Makefile index 85f97bc9d..50635960d 100644 --- a/net/nginx/Makefile +++ b/net/nginx/Makefile @@ -429,18 +429,18 @@ endif ifeq ($(CONFIG_NGINX_HTTP_BROTLI),y) define Download/nginx-brotli - VERSION:=e26248ee361c04e25f581b92b85d95681bdffb39 + VERSION:=dc37f658ccb5a51d090dc09d1a2aca2f24309869 SUBDIR:=nginx-brotli - FILE:=ngx-brotli-module-$$(VERSION).tar.gz + FILE:=ngx-brotli-module-$$(VERSION).tar.xz URL:=https://github.com/eustas/ngx_brotli.git - MIRROR_HASH:=76b891ba49f82f0cfbc9cba875646e26ee986b522373e0aa2698a9923a4adcdb + MIRROR_HASH:=6bc0c40ff24f6e0ac616dfddc803bdc7fcf54764ba9dc4f9cecb3a68beedcdaf PROTO:=git endef $(eval $(call Download,nginx-brotli)) define Prepare/nginx-brotli $(eval $(Download/nginx-brotli)) - gzip -dc $(DL_DIR)/$(FILE) | tar -C $(PKG_BUILD_DIR) $(TAR_OPTIONS) + xzcat $(DL_DIR)/$(FILE) | tar -C $(PKG_BUILD_DIR) $(TAR_OPTIONS) endef endif diff --git a/net/nginx/files-luci-support/luci_nginx.conf b/net/nginx/files-luci-support/luci_nginx.conf index 31af664a2..5b0f3da0a 100644 --- a/net/nginx/files-luci-support/luci_nginx.conf +++ b/net/nginx/files-luci-support/luci_nginx.conf @@ -20,7 +20,7 @@ http { sendfile on; keepalive_timeout 0; - + client_body_buffer_size 10K; client_header_buffer_size 1k; client_max_body_size 1G; @@ -31,14 +31,15 @@ http { gzip_vary on; gzip_comp_level 1; gzip_proxied any; - + gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript image/svg+xml; + root /www; server { listen 80 default_server; listen [::]:80 default_server; server_name localhost; - + location ~* .(jpg|jpeg|png|gif|ico|css|js)$ { expires 365d; } diff --git a/net/nginx/files-luci-support/luci_nginx_ssl.conf b/net/nginx/files-luci-support/luci_nginx_ssl.conf index 318453b54..db33e554c 100644 --- a/net/nginx/files-luci-support/luci_nginx_ssl.conf +++ b/net/nginx/files-luci-support/luci_nginx_ssl.conf @@ -20,7 +20,7 @@ http { sendfile on; keepalive_timeout 0; - + client_body_buffer_size 10K; client_header_buffer_size 1k; client_max_body_size 1G; @@ -31,9 +31,10 @@ http { gzip_vary on; gzip_comp_level 1; gzip_proxied any; - + gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript application/javascript image/svg+xml; + root /www; - + server { listen 80 default_server; listen [::]:80 default_server; @@ -45,7 +46,7 @@ http { listen 443 ssl default_server; listen [::]:443 ssl default_server; server_name localhost; - + ssl_protocols TLSv1 TLSv1.1 TLSv1.2; ssl_prefer_server_ciphers on; ssl_ciphers "EECDH+ECDSA+AESGCM:EECDH+aRSA+AESGCM:EECDH+ECDSA+SHA384:EECDH+ECDSA+SHA256:EECDH+aRSA+SHA384:EECDH+aRSA+SHA256:EECDH:DHE+AESGCM:DHE:!RSA!aNULL:!eNULL:!LOW:!RC4:!3DES:!MD5:!EXP:!PSK:!SRP:!DSS:!CAMELLIA:!SEED"; @@ -53,7 +54,7 @@ http { ssl_certificate /etc/nginx/nginx.cer; ssl_certificate_key /etc/nginx/nginx.key; - + location ~* .(jpg|jpeg|png|gif|ico|css|js)$ { expires 365d; } diff --git a/net/nsd/Makefile b/net/nsd/Makefile index 334ba8726..b6fb9f644 100644 --- a/net/nsd/Makefile +++ b/net/nsd/Makefile @@ -8,27 +8,26 @@ include $(TOPDIR)/rules.mk PKG_NAME:=nsd -PKG_VERSION:=4.1.13 +PKG_VERSION:=4.2.1 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_SOURCE_URL:=http://www.nlnetlabs.nl/downloads/nsd -PKG_HASH:=c45cd4ba2101a027e133b2be44db9378e27602e05f09a5ef25019e1ae45291af -PKG_FIXUP:=autoreconf -PKG_INSTALL:=1 - -PKG_LICENSE:=BSD-3c -PKG_LICENSE_FILES:=LICENSE +PKG_SOURCE_URL:=https://www.nlnetlabs.nl/downloads/nsd +PKG_HASH:=d17c0ea3968cb0eb2be79f2f83eb299b7bfcc554b784007616eed6ece828871f +PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) PKG_MAINTAINER:=Vasilis Tsiligiannis <acinonyx@openwrt.gr> +PKG_LICENSE:=BSD-3-Clause +PKG_LICENSE_FILES:=LICENSE -PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) +PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 include $(INCLUDE_DIR)/package.mk define Package/nsd/default TITLE:=NSD Name Server Daemon - URL:=http://www.nlnetlabs.nl/projects/nsd/ + URL:=https://www.nlnetlabs.nl/projects/nsd/ SECTION:=net CATEGORY:=Network SUBMENU:=IP Addresses and Names @@ -85,22 +84,22 @@ define Package/nsd-control-setup/description endef CONFIGURE_ARGS+= \ + --enable-packed \ + --enable-recvmmsg \ + --enable-tcp-fastopen \ --disable-checking \ - --with-libevent=no \ --with-user="network" \ + --without-libevent \ + $(if $(CONFIG_IPV6),--with,--without)-ipv6 CONFIGURE_VARS+= \ ac_cv_c_va_list_def=no \ - ac_cv_c_strptime_needs_defs=no \ + ac_cv_c_strptime_needs_defs=no ifeq ($(BUILD_VARIANT),ssl) - CONFIGURE_ARGS += \ - --with-ssl="$(STAGING_DIR)/usr" -endif - -ifeq ($(BUILD_VARIANT),nossl) - CONFIGURE_ARGS += \ - --without-ssl + CONFIGURE_ARGS += --with-ssl="$(STAGING_DIR)/usr" +else + CONFIGURE_ARGS += --without-ssl endif define Package/nsd/conffiles diff --git a/net/nsd/patches/010-musl.patch b/net/nsd/patches/010-musl.patch new file mode 100644 index 000000000..3ba61e408 --- /dev/null +++ b/net/nsd/patches/010-musl.patch @@ -0,0 +1,10 @@ +--- a/nsd-control.c ++++ b/nsd-control.c +@@ -42,6 +42,7 @@ + */ + + #include "config.h" ++#include <stdio.h> + #ifdef HAVE_SSL + + #include <sys/types.h> diff --git a/net/nsd/patches/020-openssl.patch b/net/nsd/patches/020-openssl.patch new file mode 100644 index 000000000..31381ff7f --- /dev/null +++ b/net/nsd/patches/020-openssl.patch @@ -0,0 +1,13 @@ +--- a/tsig.c ++++ b/tsig.c +@@ -19,6 +19,10 @@ + #include "query.h" + #include "rbtree.h" + ++#ifndef HAVE_SSL ++#define CRYPTO_memcmp memcmp ++#endif ++ + static region_type *tsig_region; + + struct tsig_key_table diff --git a/net/p910nd/Makefile b/net/p910nd/Makefile index 12b4e8637..cfe4ea7d9 100644 --- a/net/p910nd/Makefile +++ b/net/p910nd/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=p910nd PKG_VERSION:=0.97 -PKG_RELEASE:=7 +PKG_RELEASE:=8 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 PKG_SOURCE_URL:=@SF/p910nd @@ -28,6 +28,7 @@ define Package/p910nd SUBMENU:=Printing TITLE:=A small non-spooling printer server URL:=http://p910nd.sourceforge.net + USERID:=p910nd=393:lp=7 endef define Package/p910nd/conffiles @@ -54,6 +55,8 @@ define Package/p910nd/install $(INSTALL_DATA) ./files/p910nd.config $(1)/etc/config/p910nd $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/p910nd.init $(1)/etc/init.d/p910nd + $(INSTALL_DIR) $(1)/etc/hotplug.d/usbmisc + $(INSTALL_BIN) ./files/p910nd.hotplug $(1)/etc/hotplug.d/usbmisc/20-p910nd endef $(eval $(call BuildPackage,p910nd)) diff --git a/net/p910nd/files/p910nd.config b/net/p910nd/files/p910nd.config index b8257b77c..5446192c7 100644 --- a/net/p910nd/files/p910nd.config +++ b/net/p910nd/files/p910nd.config @@ -5,6 +5,8 @@ config p910nd option port 0 option bidirectional 1 option enabled 0 + # Override running as user p910nd, group lp + option runas_root 0 # mDNS support - see Bonjour Printing Specification for details concerning the values # Be aware that you can only advertise one printer on this host via mDNS diff --git a/net/p910nd/files/p910nd.hotplug b/net/p910nd/files/p910nd.hotplug new file mode 100644 index 000000000..0c2291efa --- /dev/null +++ b/net/p910nd/files/p910nd.hotplug @@ -0,0 +1,13 @@ +#!/bin/sh + +case "$ACTION" in + add) + [ -n "${DEVNAME}" ] && [ "${DEVNAME##usb/lp*}" = "" ] && { + chmod 660 /dev/"$DEVNAME" + chgrp lp /dev/"$DEVNAME" + } + ;; + remove) + # device is gone + ;; +esac diff --git a/net/p910nd/files/p910nd.init b/net/p910nd/files/p910nd.init index 479b8410f..0eadebd65 100644 --- a/net/p910nd/files/p910nd.init +++ b/net/p910nd/files/p910nd.init @@ -28,7 +28,7 @@ start_service() { start_p910nd() { - local section="$1" + local section="$1" runas_root config_get_bool "enabled" "$section" "enabled" '1' if [ "$enabled" -gt 0 ]; then args="-d " @@ -42,6 +42,9 @@ start_p910nd() { procd_set_param command /usr/sbin/p910nd $args procd_set_param respawn + config_get_bool runas_root "$section" runas_root 0 + [ "$runas_root" -ne 1 ] && procd_set_param user p910nd + config_get_bool "mdns" "$section" "mdns" '0' config_get mdns_note "$section" mdns_note config_get mdns_ty "$section" mdns_ty diff --git a/net/pdns/Makefile b/net/pdns/Makefile index 09b204b12..2f8685db1 100644 --- a/net/pdns/Makefile +++ b/net/pdns/Makefile @@ -1,17 +1,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=pdns -PKG_VERSION:=4.1.10 -PKG_RELEASE:=2 +PKG_VERSION:=4.1.13 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 PKG_SOURCE_URL:=https://downloads.powerdns.com/releases/ -PKG_HASH:=5a46cfde92caaaa2e85af9a15acb9ad81b56f4c8a8255c457e6938d8c0cb15c7 +PKG_HASH:=e7ea9c628a03652d2ca9e048525d44ac5628a9fede45e510ff9ba756ae2f5f25 PKG_MAINTAINER:=James Taylor <james@jtaylor.id.au> PKG_LICENCE:=GPL-2.0-only PKG_LICENCE_FILES:=COPYING -PKG_CPE_ID:=cpe:/a:powerdns:authoritative_server +PKG_CPE_ID:=cpe:/a:powerdns:authoritative PKG_FIXUP:=autoreconf diff --git a/net/pdns/files/pdns.conf-dist b/net/pdns/files/pdns.conf-dist index e208c1b34..96e09a550 100644 --- a/net/pdns/files/pdns.conf-dist +++ b/net/pdns/files/pdns.conf-dist @@ -92,7 +92,7 @@ ################################# # config-dir Location of configuration directory (pdns.conf) # -# config-dir=/usr/local/etc +# config-dir=/etc/powerdns ################################# # config-name Name of this virtual configuration - will rename the binary image @@ -377,7 +377,7 @@ ################################# # module-dir Default directory for modules # -# module-dir=/usr/local/lib/pdns +# module-dir=/usr/lib/powerdns/pdns ################################# # negquery-cache-ttl Seconds to store negative query results in the QueryCache @@ -530,6 +530,11 @@ # socket-dir= ################################# +# superslave Act as a superslave +# +# superslave=yes + +################################# # tcp-control-address If set, PowerDNS can be controlled over TCP on this address # # tcp-control-address= @@ -618,3 +623,5 @@ # xfr-max-received-mbytes Maximum number of megabytes received from an incoming XFR # # xfr-max-received-mbytes=100 + + diff --git a/net/phantap/Makefile b/net/phantap/Makefile new file mode 100644 index 000000000..083757042 --- /dev/null +++ b/net/phantap/Makefile @@ -0,0 +1,86 @@ +# Copyright (C) 2019 Diana Dragusin <diana.dragusin@nccgroup.com> +# Copyright (C) 2019 Etienne Champetier <champetier.etienne@gmail.com> +# +# This is free software, licensed under the GNU General Public License v3. +# See <http://www.gnu.org/licenses/> for more information. + +include $(TOPDIR)/rules.mk + +PKG_NAME:=phantap +PKG_RELEASE:=1 + +PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/nccgroup/phantap +PKG_MIRROR_HASH:=d81f7b52f2259af093240446674b41ce354222aa7d851504fbc445f3a7c1431e +PKG_SOURCE_DATE:=2019.08.13 +PKG_SOURCE_VERSION:=f5420af847dd53b2d4cf2b5c2551239709d51bf7 + +PKG_MAINTAINER:=Diana Dragusin <diana.dragusin@nccgroup.com>, \ + Etienne Champetier <champetier.etienne@gmail.com> +PKG_LICENSE:=GPL-3.0-only + +include $(INCLUDE_DIR)/package.mk +include $(INCLUDE_DIR)/cmake.mk + +CMAKE_SOURCE_SUBDIR:=src + +define Package/phantap/Default + SECTION:=net + CATEGORY:=Network + URL:=https://github.com/nccgroup/phantap +endef + +define Package/phantap + $(call Package/phantap/Default) + TITLE:=PhanTap + PKGARCH:=all + DEPENDS:=+ebtables +tcpdump +ip-full +kmod-br-netfilter +kmod-ebtables-ipv4 +endef + +define Package/phantap/conffiles +/etc/config/phantap +endef + +define Package/phantap/description + PhanTap or Phantom tap is a small set of scripts that allow you to setup a network tap + that automatically impersonate a victim device, allowing you to access internet using + the IP & MAC of the victim. To speak to machines in the same L2, see PhanTap learn +endef + +define Package/phantap-learn + $(call Package/phantap/Default) + TITLE:=PhanTap-learn + DEPENDS:=+libpcap +ip-full +endef + +define Package/phantap-learn/description + PhanTap learn listens to multicast / broadcast / arp traffic to fill the arp table + and add routes to the discovered IPs. +endef + +define Package/phantap/install + $(INSTALL_DIR) $(1)/etc/config + $(INSTALL_DATA) $(PKG_BUILD_DIR)/files/etc/config/phantap $(1)/etc/config/ + $(INSTALL_DIR) $(1)/etc/hotplug.d/iface + $(INSTALL_DATA) $(PKG_BUILD_DIR)/files/etc/hotplug.d/iface/00-phantap $(1)/etc/hotplug.d/iface/ + $(INSTALL_DIR) $(1)/etc/hotplug.d/net + $(INSTALL_DATA) $(PKG_BUILD_DIR)/files/etc/hotplug.d/net/00-phantap $(1)/etc/hotplug.d/net/ + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) $(PKG_BUILD_DIR)/files/etc/init.d/phantap $(1)/etc/init.d/ + $(INSTALL_DIR) $(1)/etc/sysctl.d + $(INSTALL_DATA) $(PKG_BUILD_DIR)/files/etc/sysctl.d/12-phantap.conf $(1)/etc/sysctl.d/ + $(INSTALL_DIR) $(1)/usr/bin + $(INSTALL_BIN) $(PKG_BUILD_DIR)/files/usr/bin/phantap $(1)/usr/bin/ +endef + +define Package/phantap-learn/install + $(INSTALL_DIR) $(1)/etc/hotplug.d/iface + $(INSTALL_DATA) $(PKG_BUILD_DIR)/files/etc/hotplug.d/iface/00-phantap-learn $(1)/etc/hotplug.d/iface/ + $(INSTALL_DIR) $(1)/etc/init.d + $(INSTALL_BIN) $(PKG_BUILD_DIR)/files/etc/init.d/phantap-learn $(1)/etc/init.d/ + $(INSTALL_DIR) $(1)/usr/sbin + $(INSTALL_BIN) $(PKG_BUILD_DIR)/phantap-learn $(1)/usr/sbin/ +endef + +$(eval $(call BuildPackage,phantap)) +$(eval $(call BuildPackage,phantap-learn)) diff --git a/net/samba4/Makefile b/net/samba4/Makefile index 3fb6c29ca..5e416fab8 100644 --- a/net/samba4/Makefile +++ b/net/samba4/Makefile @@ -3,23 +3,23 @@ include $(TOPDIR)/rules.mk PKG_NAME:=samba PKG_VERSION:=4.9.11 -PKG_RELEASE:=1 - -PKG_MAINTAINER:=Andy Walsh <andy.walsh44+github@gmail.com> -PKG_LICENSE:=GPL-3.0-only -PKG_LICENSE_FILES:=COPYING +PKG_RELEASE:=3 +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://ftp.heanet.ie/mirrors/ftp.samba.org/stable/ \ https://ftp.gwdg.de/pub/samba/stable/ \ https://ftp.riken.jp/net/samba/samba/stable/ \ http://www.nic.funet.fi/index/samba/pub/samba/stable/ \ http://samba.mirror.bit.nl/samba/ftp/stable/ \ https://download.samba.org/pub/samba/stable/ -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_HASH:=bb736624d16f7369e395de2f15fec153b554f76f95864015b4ce1f2ae53e817b +PKG_MAINTAINER:=Andy Walsh <andy.walsh44+github@gmail.com> +PKG_LICENSE:=GPL-3.0-only +PKG_LICENSE_FILES:=COPYING + # samba4=(asn1_compile) e2fsprogs=(compile_et) nfs-kernel-server=(rpcgen) -HOST_BUILD_DEPENDS:=nfs-kernel-server/host e2fsprogs/host +HOST_BUILD_DEPENDS:=python/host nfs-kernel-server/host e2fsprogs/host PKG_BUILD_DEPENDS:=samba4/host PKG_CONFIG_DEPENDS:= \ @@ -43,7 +43,7 @@ define Package/samba4/Default SECTION:=net CATEGORY:=Network TITLE:=Samba $(PKG_VERSION) - URL:=http://www.samba.org/ + URL:=https://www.samba.org/ endef define Package/samba4/Default/description diff --git a/net/samba4/files/samba.init b/net/samba4/files/samba.init index 48a2d0dca..d7eed562a 100644 --- a/net/samba4/files/samba.init +++ b/net/samba4/files/samba.init @@ -7,7 +7,8 @@ smb_header() { config_get samba_iface $1 interface "loopback lan" # resolve interfaces - local interfaces=$( + local interfaces + interfaces=$( . /lib/functions/network.sh local net @@ -15,19 +16,20 @@ smb_header() { local device network_is_up $net || continue network_get_device device "$net" - echo -n "${device:-$net} " + printf "%s " "${device:-$net}" done ) local workgroup description charset # we dont use netbios anymore as default and wsd/avahi is dns based - local hostname="$(cat /proc/sys/kernel/hostname)" + local hostname + hostname="$(cat /proc/sys/kernel/hostname)" config_get workgroup $1 workgroup "WORKGROUP" config_get description $1 description "Samba on OpenWrt" - config_get charset $1 charset "UTF-8" - - config_get_bool MACOS $1 macos 0 + config_get charset $1 charset "UTF-8" + + config_get_bool MACOS $1 macos 0 config_get_bool DISABLE_NETBIOS $1 disable_netbios 0 config_get_bool DISABLE_AD_DC $1 disable_ad_dc 0 config_get_bool DISABLE_WINBIND $1 disable_winbind 0 @@ -40,15 +42,16 @@ smb_header() { -e "s#|CHARSET|#$charset#g" \ /etc/samba/smb.conf.template > /var/etc/smb.conf - echo -e "\n######### Dynamic written config options #########\n" >> /var/etc/smb.conf - if [ "$DISABLE_NETBIOS" -eq 1 ] || [ ! -x /usr/sbin/nmbd ]; then - echo -e "\tdisable netbios = yes" >> /var/etc/smb.conf - fi + { + printf "\n######### Dynamic written config options #########\n" + if [ "$DISABLE_NETBIOS" -eq 1 ] || [ ! -x /usr/sbin/nmbd ]; then + printf "\tdisable netbios = yes\n" + fi - local homes - config_get_bool homes $1 homes 0 - [ $homes -gt 0 ] && { - cat <<EOT >> /var/etc/smb.conf + local homes + config_get_bool homes $1 homes 0 + [ $homes -gt 0 ] && { + cat <<EOT [homes] comment = Home Directories @@ -57,14 +60,15 @@ smb_header() { read only = no create mask = 0750 EOT - } + } + } >> /var/etc/smb.conf [ -e /etc/samba/smb.conf ] || ln -nsf /var/etc/smb.conf /etc/samba/smb.conf - + if ! [ -L /etc/samba/smb.conf ]; then logger -t 'samba4-server' "Local custom /etc/samba/smb.conf file detected, all luci/config settings are ignored!" fi - + } smb_add_share() { @@ -104,43 +108,45 @@ smb_add_share() { config_get write_list $1 write_list config_get read_list $1 read_list - [ -z "$name" -o -z "$path" ] && return + [ -z "$name" ] || [ -z "$path" ] && return - echo -e "\n[$name]\n\tpath = $path" >> /var/etc/smb.conf - - if [ "$force_root" -eq 1 ]; then - echo -e "\tforce user = root" >> /var/etc/smb.conf - echo -e "\tforce group = root" >> /var/etc/smb.conf - else - [ -n "$users" ] && echo -e "\tvalid users = $users" >> /var/etc/smb.conf - fi + { + printf "\n[$name]\n\tpath = %s\n" "$path" - [ -n "$create_mask" ] && echo -e "\tcreate mask = $create_mask" >> /var/etc/smb.conf - [ -n "$dir_mask" ] && echo -e "\tdirectory mask = $dir_mask" >> /var/etc/smb.conf - - [ -n "$browseable" ] && echo -e "\tbrowseable = $browseable" >> /var/etc/smb.conf - [ -n "$read_only" ] && echo -e "\tread only = $read_only" >> /var/etc/smb.conf - [ -n "$writeable" ] && echo -e "\twriteable = $writeable" >> /var/etc/smb.conf - [ -n "$guest_ok" ] && echo -e "\tguest ok = $guest_ok" >> /var/etc/smb.conf - [ -n "$guest_only" ] && echo -e "\tguest only = $guest_only" >> /var/etc/smb.conf - [ -n "$inherit_owner" ] && echo -e "\tinherit owner = $inherit_owner" >> /var/etc/smb.conf - - [ -n "$write_list" ] && echo -e "\twrite list = $write_list" >> /var/etc/smb.conf - [ -n "$read_list" ] && echo -e "\tread list = $read_list" >> /var/etc/smb.conf - - if [ "$MACOS" -eq 1 ]; then - vfs_objects="catia fruit streams_xattr $vfs_objects" - echo -e "\tfruit:encoding = native" >> /var/etc/smb.conf - echo -e "\tfruit:metadata = stream" >> /var/etc/smb.conf - echo -e "\tfruit:veto_appledouble = no" >> /var/etc/smb.conf - # avoid mixed shares order for aapl - if [ "$timemachine" -eq 1 ]; then - echo -e "\tfruit:time machine = yes" >> /var/etc/smb.conf - [ -n "$timemachine_maxsize" ] && echo -e "\tfruit:time machine max size = ${timemachine_maxsize}G" >> /var/etc/smb.conf + if [ "$force_root" -eq 1 ]; then + printf "\tforce user = root\n" + printf "\tforce group = root\n" + else + [ -n "$users" ] && printf "\tvalid users = %s\n" "$users" fi - fi - - [ -n "$vfs_objects" ] && echo -e "\tvfs objects = $vfs_objects" >> /var/etc/smb.conf + + [ -n "$create_mask" ] && printf "\tcreate mask = %s\n" "$create_mask" + [ -n "$dir_mask" ] && printf "\tdirectory mask = %s\n" "$dir_mask" + + [ -n "$browseable" ] && printf "\tbrowseable = %s\n" "$browseable" + [ -n "$read_only" ] && printf "\tread only = %s\n" "$read_only" + [ -n "$writeable" ] && printf "\twriteable = %s\n" "$writeable" + [ -n "$guest_ok" ] && printf "\tguest ok = %s\n" "$guest_ok" + [ -n "$guest_only" ] && printf "\tguest only = %s\n" "$guest_only" + [ -n "$inherit_owner" ] && printf "\tinherit owner = %s\n" "$inherit_owner" + + [ -n "$write_list" ] && printf "\twrite list = %s\n" "$write_list" + [ -n "$read_list" ] && printf "\tread list = %s\n" "$read_list" + + if [ "$MACOS" -eq 1 ]; then + vfs_objects="catia fruit streams_xattr $vfs_objects" + printf "\tfruit:encoding = native\n" + printf "\tfruit:metadata = stream\n" + printf "\tfruit:veto_appledouble = no\n" + # avoid mixed shares order for aapl + if [ "$timemachine" -eq 1 ]; then + printf "\tfruit:time machine = yes\n" + [ -n "$timemachine_maxsize" ] && printf "\tfruit:time machine max size = %sG\n" "${timemachine_maxsize}" + fi + fi + + [ -n "$vfs_objects" ] && printf "\tvfs objects = %s\n" "$vfs_objects" + } >> /var/etc/smb.conf } init_config() { @@ -149,10 +155,8 @@ init_config() { [ -d /var/cache/samba ] || mkdir -p /var/cache/samba [ -d /var/run/samba ] || mkdir -p /var/run/samba [ -d /var/log/samba ] || mkdir -p /var/log/samba - [ -d /var/lock ] && chmod 0755 /var/lock || { - mkdir -p /var/lock - chmod 0755 /var/lock - } + [ -d /var/lock ] || mkdir -p /var/lock + chmod 0755 /var/lock config_load samba4 config_foreach smb_header samba @@ -161,9 +165,9 @@ init_config() { service_triggers() { PROCD_RELOAD_DELAY=2000 - + procd_add_reload_trigger "dhcp" "system" "samba4" - + local i for i in $samba_iface; do procd_add_reload_interface_trigger $i @@ -207,9 +211,9 @@ start_service() { fi # lower priority using renice (if found) if [ -x /usr/bin/renice ]; then - [ -x /usr/sbin/samba ] && renice -n 2 $(pidof samba) - [ -x /usr/sbin/smbd ] && renice -n 2 $(pidof smbd) - [ -x /usr/sbin/nmbd ] && renice -n 2 $(pidof nmbd) - [ -x /usr/sbin/winbindd ] && renice -n 2 $(pidof winbindd) + [ -x /usr/sbin/samba ] && renice -n 2 "$(pidof samba)" + [ -x /usr/sbin/smbd ] && renice -n 2 "$(pidof smbd)" + [ -x /usr/sbin/nmbd ] && renice -n 2 "$(pidof nmbd)" + [ -x /usr/sbin/winbindd ] && renice -n 2 "$(pidof winbindd)" fi } diff --git a/net/shadowsocks-libev/Makefile b/net/shadowsocks-libev/Makefile index 2948ca8ce..3532d3c7f 100644 --- a/net/shadowsocks-libev/Makefile +++ b/net/shadowsocks-libev/Makefile @@ -13,12 +13,12 @@ include $(TOPDIR)/rules.mk # - check if default mode has changed from being tcp_only # PKG_NAME:=shadowsocks-libev -PKG_VERSION:=3.3.0 +PKG_VERSION:=3.3.1 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://github.com/shadowsocks/shadowsocks-libev/releases/download/v$(PKG_VERSION) -PKG_HASH:=9732f8b8f02ffeea261bcf15fbf104f826012f74dbee99d016b75f0894a39649 +PKG_HASH:=afd25ae5e737be385fa53364c66095c354277e98cf141b54beb2be93d9228f4f PKG_MAINTAINER:=Yousong Zhou <yszhou4tech@gmail.com> diff --git a/net/stubby/files/README.md b/net/stubby/files/README.md index bc5344cd8..12fcef397 100644 --- a/net/stubby/files/README.md +++ b/net/stubby/files/README.md @@ -86,7 +86,7 @@ to use resolvers found in `/etc/resolv.conf` by setting the dnsmasq option command line: uci add_list dhcp.@dnsmasq[-1].server='127.0.0.1#5453' - uci dhcp.@dnsmasq[-1].noresolv=1 + uci set dhcp.@dnsmasq[-1].noresolv=1 uci commit && reload_config The same outcome can be achieved in the LUCI web interface as follows: diff --git a/net/stunnel/Makefile b/net/stunnel/Makefile index 77771dda6..b1984e042 100644 --- a/net/stunnel/Makefile +++ b/net/stunnel/Makefile @@ -8,7 +8,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=stunnel -PKG_VERSION:=5.54 +PKG_VERSION:=5.55 PKG_RELEASE:=1 PKG_LICENSE:=GPL-2.0+ @@ -23,7 +23,7 @@ PKG_SOURCE_URL:= \ https://www.usenix.org.uk/mirrors/stunnel/archive/$(word 1, $(subst .,$(space),$(PKG_VERSION))).x/ PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_HASH:=5e8588a6c274b46b1d63e1b50f0725f4908dec736f6588eb48d1eb3d20c87902 +PKG_HASH:=90de69f41c58342549e74c82503555a6426961b29af3ed92f878192727074c62 PKG_FIXUP:=autoreconf PKG_FIXUP:=patch-libtool diff --git a/net/stunnel/files/stunnel.init b/net/stunnel/files/stunnel.init index bc46bd011..c04282657 100644 --- a/net/stunnel/files/stunnel.init +++ b/net/stunnel/files/stunnel.init @@ -124,7 +124,7 @@ validate_service_section_service_options() { print_options() { local _opt local _value - for _opt in $*; do + for _opt in "$@"; do eval "_value=\$$_opt" [ -z "$_value" ] || echo "$_opt = $_value" >> "$CONF_FILE" done @@ -134,7 +134,7 @@ print_bool_options() { local _opt local _bool local _value - for _opt in $*; do + for _opt in "$@"; do eval "_bool=\$$_opt" [ -z "$_bool" ] || { _value=no @@ -148,7 +148,7 @@ print_lists_map() { local _opt local _values local _value - for _opt in $*; do + for _opt in "$@"; do eval "_values=\$$_opt" for _value in $_values; do echo "$_opt = $_value" >> "$CONF_FILE" @@ -163,7 +163,7 @@ print_lists_reduce() { local _values local _v shift - for _opt in $*; do + for _opt in "$@"; do _value= eval "_values=\$$_opt" for _v in $_values; do @@ -178,7 +178,7 @@ print_host_port() { local _opt local _host local _port - for _opt in $*; do + for _opt in "$@"; do eval "_host=\${${_opt}_host}" eval "_port=\${${_opt}_port}" [ -z "$_host" ] || [ -z "$_port" ] || echo "$_opt = $_host:$_port" >> "$CONF_FILE" @@ -190,7 +190,7 @@ print_optional_host_port() { local _host local _port local _value - for _opt in $*; do + for _opt in "$@"; do eval "_host=\${${_opt}_host}" eval "_port=\${${_opt}_port}" [ -z "$_port" ] || { @@ -376,10 +376,10 @@ service_triggers() { procd_add_reload_trigger stunnel procd_open_validate - validate_globals_section - validate_globals_section_service_options - validate_service_section - validate_service_section_service_options + validate_globals_section "$@" + validate_globals_section_service_options "$@" + validate_service_section "$@" + validate_service_section_service_options "$@" procd_close_validate } diff --git a/net/subversion/Makefile b/net/subversion/Makefile index 9cafc73dd..cbd7d71dd 100644 --- a/net/subversion/Makefile +++ b/net/subversion/Makefile @@ -6,14 +6,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=subversion -PKG_RELEASE:=1 -PKG_VERSION:=1.12.0 +PKG_RELEASE:=2 +PKG_VERSION:=1.12.2 PKG_SOURCE_URL:=@APACHE/subversion PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 -PKG_HASH:=7fae7c73d8a007c107c0ae5eb372bc0bb013dbfe966fcd5c59cd5a195a5e2edf +PKG_HASH:=3bd0b5c8e4c5175263dc9a92fd9aef94ce917e80af034f26fe5c45fde7e0f771 PKG_LICENSE:=Apache-2.0 PKG_LICENSE_FILES:=LICENSE PKG_MAINTAINER:=Val Kulkov <val.kulkov@gmail.com> +PKG_CPE_ID:=cpe:/a:apache:subversion PKG_FIXUP:=autoreconf PKG_MACRO_PATHS:=build/ac-macros @@ -35,11 +36,11 @@ define Package/subversion/Default endef define Package/subversion/Default/description - Subversion is a free/open-source version control system. That is, - Subversion manages files and directories, and the changes made to them, - over time. This allows you to recover older versions of your data, or - examine the history of how your data changed. In this regard, many - people think of a version control system as a sort of time machine. + Subversion is a free/open-source version control system. That is, + Subversion manages files and directories, and the changes made to them, + over time. This allows you to recover older versions of your data, or + examine the history of how your data changed. In this regard, many + people think of a version control system as a sort of time machine. endef define Package/subversion-libs diff --git a/net/travelmate/Makefile b/net/travelmate/Makefile index 1cf919996..17eb903b8 100644 --- a/net/travelmate/Makefile +++ b/net/travelmate/Makefile @@ -6,7 +6,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=travelmate -PKG_VERSION:=1.4.9 +PKG_VERSION:=1.4.11 PKG_RELEASE:=1 PKG_LICENSE:=GPL-3.0+ PKG_MAINTAINER:=Dirk Brenken <dev@brenken.org> @@ -17,7 +17,7 @@ define Package/travelmate SECTION:=net CATEGORY:=Network TITLE:=A wlan connection manager for travel router - DEPENDS:=+iwinfo +jshn +jsonfilter +uclient-fetch + DEPENDS:=+iwinfo +jshn +jsonfilter +uclient-fetch +dnsmasq PKGARCH:=all endef @@ -42,13 +42,16 @@ endef define Package/travelmate/install $(INSTALL_DIR) $(1)/usr/bin - $(INSTALL_BIN) ./files/travelmate.sh $(1)/usr/bin/ + $(INSTALL_BIN) ./files/travelmate.sh $(1)/usr/bin $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/travelmate.init $(1)/etc/init.d/travelmate $(INSTALL_DIR) $(1)/etc/config $(INSTALL_CONF) ./files/travelmate.conf $(1)/etc/config/travelmate + + $(INSTALL_DIR) $(1)/etc/travelmate + $(INSTALL_BIN) ./files/*.login $(1)/etc/travelmate endef $(eval $(call BuildPackage,travelmate)) diff --git a/net/travelmate/files/README.md b/net/travelmate/files/README.md index e261e7661..f84fffa73 100644 --- a/net/travelmate/files/README.md +++ b/net/travelmate/files/README.md @@ -13,6 +13,7 @@ To avoid these kind of deadlocks, travelmate will set all station interfaces to * support all kinds of uplinks, incl. hidden and enterprise uplinks * continuously checks the existing uplink connection (quality), e.g. for conditional uplink (dis-) connections * captive portal detection with internet online check and a 'heartbeat' function to keep the uplink connection up & running +* captive portal auto-login hook (configured via uci/LuCI), you could reference an external script for captive portal auto-logins (see example below) * proactively scan and switch to a higher prioritized uplink, despite of an already existing connection * support devices with multiple radios in any order * procd init and hotplug support @@ -21,10 +22,11 @@ To avoid these kind of deadlocks, travelmate will set all station interfaces to * optional: the LuCI frontend shows the WiFi QR codes from all configured Access Points. It allows you to connect your Android or iOS devices to your router’s WiFi using the QR code ## Prerequisites -* [OpenWrt](https://openwrt.org), tested with the stable release series (18.06.x) and with the latest OpenWrt snapshot -* iwinfo for wlan scanning, uclient-fetch for captive portal detection +* [OpenWrt](https://openwrt.org), tested with the stable release series (19.07.x) and with the latest OpenWrt snapshot +* iwinfo for wlan scanning, uclient-fetch for captive portal detection, dnsmasq as dns backend * optional: qrencode 4.x for QR code support * optional: wpad (the full version, not wpad-mini) to use Enterprise WiFi +* optional: curl to use external scripts for captive portal auto-logins ## Installation & Usage * download the package [here](https://downloads.openwrt.org/snapshots/packages/x86_64/packages) @@ -50,24 +52,38 @@ To avoid these kind of deadlocks, travelmate will set all station interfaces to * trm\_maxretry => how many times should travelmate try to connect to an uplink (int/default: '3', valid range: 1-10) * trm\_timeout => overall retry timeout in seconds (int/default: '60', valid range: 30-300) * trm\_radio => limit travelmate to a single radio (e.g. 'radio1') or change the overall scanning priority (e.g. 'radio1 radio2 radio0') (default: not set, use all radios 0-n) - * trm\_iface => main uplink / procd trigger network interface (default: trm_wwan) + * trm\_iface => uplink / procd trigger network interface (default: trm_wwan) * trm\_triggerdelay => additional trigger delay in seconds before travelmate processing begins (int/default: '2') +## Captive Portal auto-logins +For automated captive portal logins you could reference external shell scripts. All login scripts should be executable and located in '/etc/travelmate' with the extension '.login'. The provided 'wifionice.login' script example requires curl and automates the login to german ICE hotspots, it also explains the principle approach to extract runtime data like security tokens for a succesful login. Hopefully more scripts for different captive portals will be provided by the community ... + +A typical/succesful captive portal login looks like this: +<pre><code> +[...] +Mon Aug 5 10:15:48 2019 user.info travelmate-1.4.10[1481]: travelmate instance started ::: action: start, pid: 1481 +Mon Aug 5 10:16:17 2019 user.info travelmate-1.4.10[1481]: captive portal login '/etc/travelmate/wifionice.login' for 'www.wifionice.de' has been executed with rc '0' +Mon Aug 5 10:16:23 2019 user.info travelmate-1.4.10[1481]: connected to uplink 'radio1/WIFIonICE/-' (1/5, GL.iNet GL-AR750S, OpenWrt SNAPSHOT r10644-cb49e46a8a) +[...] +</code></pre> + ## Runtime information **receive travelmate runtime information:** <pre><code> ~# /etc/init.d/travelmate status ::: travelmate runtime information - + travelmate_status : connected (net ok/78) - + travelmate_version : 1.2.3 - + station_id : radio1/blackhole/01:02:03:04:05:06 + + travelmate_status : connected (net ok/100) + + travelmate_version : 1.4.10 + + station_id : radio1/blackhole/- + station_interface : trm_wwan + faulty_stations : - + last_rundate : 07.09.2018 17:22:37 - + system : TP-LINK RE450, OpenWrt SNAPSHOT r8018-42f158314e + + last_rundate : 2019.08.03-20:37:19 + + system : GL.iNet GL-AR750S, OpenWrt SNAPSHOT r10644-cb49e46a8a </code></pre> +To debug travelmate runtime problems, please always enable the 'trm\_debug' flag, restart travelmate and scan the system log (_logread -e "travelmate"_) + ## Manual Setup **1. configure the travelmate wwan interface in /etc/config/network:** <pre><code> diff --git a/net/travelmate/files/travelmate.sh b/net/travelmate/files/travelmate.sh index acb83a0ad..1e00179a2 100755 --- a/net/travelmate/files/travelmate.sh +++ b/net/travelmate/files/travelmate.sh @@ -10,7 +10,7 @@ # LC_ALL=C PATH="/usr/sbin:/usr/bin:/sbin:/bin" -trm_ver="1.4.9" +trm_ver="1.4.11" trm_sysver="unknown" trm_enabled=0 trm_debug=0 @@ -65,10 +65,9 @@ f_envload() trm_sysver="${sys_model}, ${sys_desc}" fi - # get eap capabilities and rebind protection setting + # get eap capabilities # trm_eap="$("${trm_wpa}" -veap >/dev/null 2>&1; printf "%u" ${?})" - trm_rebind="$(uci_get dhcp "@dnsmasq[0]" rebind_protection)" # load config and check 'enabled' option # @@ -169,24 +168,24 @@ f_prep() fi fi fi - f_log "debug" "f_prep ::: config: ${config}, mode: ${mode}, network: ${network}, radio: ${radio}, trm_radio: ${trm_radio:-"-"}, trm_active_sta: ${trm_active_sta:-"-"}, proactive: ${proactive}, trm_eap: ${trm_eap:-"-"}, trm_rebind: ${trm_rebind:-"-"}, disabled: ${disabled}" + f_log "debug" "f_prep ::: config: ${config}, mode: ${mode}, network: ${network}, radio: ${radio}, trm_radio: ${trm_radio:-"-"}, trm_active_sta: ${trm_active_sta:-"-"}, proactive: ${proactive}, trm_eap: ${trm_eap:-"-"}, disabled: ${disabled}" } # check interface status # f_check() { - local IFS ifname radio dev_status last_status config sta_essid sta_bssid result cp_domain wait mode="${1}" status="${2:-"false"}" + local IFS ifname radio dev_status config sta_essid sta_bssid result uci_essid uci_bssid login_command bg_pid wait_time mode="${1}" status="${2:-"false"}" cp_domain="${3:-"false"}" if [ "${mode}" != "initial" ] && [ "${status}" = "false" ] then ubus call network reload - wait=$((trm_maxwait/6)) - sleep ${wait} + wait_time=$((trm_maxwait/6)) + sleep ${wait_time} fi - wait=1 - while [ "${wait}" -le "${trm_maxwait}" ] + wait_time=1 + while [ "${wait_time}" -le "${trm_maxwait}" ] do dev_status="$(ubus -S call network.wireless status 2>/dev/null)" if [ -n "${dev_status}" ] @@ -206,7 +205,7 @@ f_check() trm_devlist="$(f_trim "${trm_devlist} ${radio}")" fi done - if [ "${trm_devlist}" = "${trm_radiolist}" ] || [ "${wait}" -eq "${trm_maxwait}" ] + if [ "${trm_devlist}" = "${trm_radiolist}" ] || [ "${wait_time}" -eq "${trm_maxwait}" ] then ifname="${trm_devlist}" break @@ -224,21 +223,33 @@ f_check() if [ "${mode}" = "initial" ] && [ "${trm_captive}" -eq 1 ] then result="$(${trm_fetch} --timeout=$((trm_maxwait/6)) "${trm_captiveurl}" -O /dev/null 2>&1 | \ - awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|^Connection error/{printf "%s" "net nok";exit}')" + awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|Connection error/{printf "%s" "net nok";exit}')" + if [ "${cp_domain}" = "true" ] + then + cp_domain="$(printf "%s" "${result}" | awk -F "[\\'| ]" '/^net cp/{printf "%s" $4}')" + uci_essid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.ssid')" + uci_essid="$(printf "%s" "${uci_essid//[^[:alnum:]_]/_}" | awk '{print tolower($1)}')" + uci_bssid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.bssid')" + uci_bssid="${uci_bssid//[^[:alnum:]_]/_}" + fi fi - if [ "${trm_ifquality}" -ge "${trm_minquality}" ] && [ "${result%/*}" != "net nok" ] + if [ "${trm_ifquality}" -ge "${trm_minquality}" ] && [ "${result}" != "net nok" ] then trm_ifstatus="$(ubus -S call network.interface dump 2>/dev/null | jsonfilter -l1 -e "@.interface[@.device=\"${ifname}\"].up")" if [ "${trm_ifstatus}" = "true" ] then - if [ "${mode}" = "sta" ] && [ "${trm_captive}" -eq 1 ] && [ "${trm_rebind:-0}" -eq 1 ] && [ -x "/etc/init.d/dnsmasq" ] + if [ "${mode}" = "sta" ] && [ "${trm_captive}" -eq 1 ] then while true do result="$(${trm_fetch} --timeout=$((trm_maxwait/6)) "${trm_captiveurl}" -O /dev/null 2>&1 | \ - awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|^Connection error/{printf "%s" "net nok";exit}')" + awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|Connection error/{printf "%s" "net nok";exit}')" cp_domain="$(printf "%s" "${result}" | awk -F "[\\'| ]" '/^net cp/{printf "%s" $4}')" - if [ "${trm_netcheck}" -eq 1 ] && [ "${result%/*}" = "net nok" ] + uci_essid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.ssid')" + uci_essid="$(printf "%s" "${uci_essid//[^[:alnum:]_]/_}" | awk '{print tolower($1)}')" + uci_bssid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].config.bssid')" + uci_bssid="${uci_bssid//[^[:alnum:]_]/_}" + if [ "${trm_netcheck}" -eq 1 ] && [ "${result}" = "net nok" ] then trm_ifstatus="${status}" f_jsnup @@ -249,13 +260,40 @@ f_check() break fi uci -q add_list dhcp.@dnsmasq[0].rebind_domain="${cp_domain}" - f_log "info" "captive portal domain '${cp_domain}' added to rebind whitelist" + f_log "info" "captive portal domain '${cp_domain}' added to to dhcp rebind whitelist" + if [ -z "$(uci_get travelmate "${uci_essid}${uci_bssid}")" ] + then + uci_add travelmate "login" "${uci_essid}${uci_bssid}" + uci_set travelmate "${uci_essid}${uci_bssid}" "command" "none" + f_log "info" "captive portal login section '${uci_essid}${uci_bssid}' added to travelmate config section" + fi done if [ -n "$(uci -q changes dhcp)" ] then uci_commit dhcp /etc/init.d/dnsmasq reload fi + if [ -n "$(uci -q changes travelmate)" ] + then + uci_commit travelmate + fi + fi + if [ -n "${cp_domain}" ] && [ "${cp_domain}" != "false" ] && [ -n "${uci_essid}" ] && [ "${trm_captive}" -eq 1 ] + then + trm_connection="${result:-"-"}/${trm_ifquality}" + f_jsnup + login_command="$(uci_get travelmate "${uci_essid}${uci_bssid}" command)" + if [ -x "${login_command}" ] + then + "${login_command}" >/dev/null 2>&1 + rc=${?} + f_log "info" "captive portal login '${login_command:0:40}' for '${cp_domain}' has been executed with rc '${rc}'" + if [ "${rc}" -eq 0 ] + then + result="$(${trm_fetch} --timeout=$((trm_maxwait/6)) "${trm_captiveurl}" -O /dev/null 2>&1 | \ + awk '/^Failed to redirect|^Redirected/{printf "%s" "net cp \047"$NF"\047";exit}/^Download completed/{printf "%s" "net ok";exit}/^Failed|Connection error/{printf "%s" "net nok";exit}')" + fi + fi fi trm_connection="${result:-"-"}/${trm_ifquality}" f_jsnup @@ -267,13 +305,15 @@ f_check() sta_bssid="$(printf "%s" "${dev_status}" | jsonfilter -l1 -e '@.*.interfaces[@.config.mode="sta"].*.bssid')" if [ "${trm_ifquality}" -lt "${trm_minquality}" ] then + unset trm_connection + trm_ifstatus="${status}" f_log "info" "uplink '${sta_essid:-"-"}/${sta_bssid:-"-"}' is out of range (${trm_ifquality}/${trm_minquality})" - elif [ "${trm_netcheck}" -eq 1 ] && [ "${result%/*}" = "net nok" ] + elif [ "${trm_netcheck}" -eq 1 ] && [ "${result}" = "net nok" ] then + unset trm_connection + trm_ifstatus="${status}" f_log "info" "uplink '${sta_essid:-"-"}/${sta_bssid:-"-"}' has no internet (${result})" fi - unset trm_connection - trm_ifstatus="${status}" f_jsnup break elif [ "${mode}" = "initial" ] @@ -294,10 +334,10 @@ f_check() fi fi fi - wait=$((wait+1)) + wait_time=$((wait_time+1)) sleep 1 done - f_log "debug" "f_check::: mode: ${mode}, name: ${ifname:-"-"}, status: ${trm_ifstatus}, connection: ${trm_connection:-"-"}, wait: ${wait}, max_wait: ${trm_maxwait}, min_quality: ${trm_minquality}, captive: ${trm_captive}, netcheck: ${trm_netcheck}" + f_log "debug" "f_check::: mode: ${mode}, name: ${ifname:-"-"}, status: ${trm_ifstatus}, connection: ${trm_connection:-"-"}, wait: ${wait_time}, max_wait: ${trm_maxwait}, min_quality: ${trm_minquality}, captive: ${trm_captive}, netcheck: ${trm_netcheck}" } # update runtime information @@ -399,7 +439,7 @@ f_main() local IFS cnt dev config spec scan_list scan_essid scan_bssid scan_quality faulty_list local station_id sta sta_essid sta_bssid sta_radio sta_iface active_essid active_bssid active_radio - f_check "initial" + f_check "initial" "false" "true" f_log "debug" "f_main ::: status: ${trm_ifstatus}, proactive: ${trm_proactive}" if [ "${trm_ifstatus}" != "true" ] || [ "${trm_proactive}" -eq 1 ] then diff --git a/net/travelmate/files/wifionice.login b/net/travelmate/files/wifionice.login new file mode 100755 index 000000000..5687921d9 --- /dev/null +++ b/net/travelmate/files/wifionice.login @@ -0,0 +1,37 @@ +#!/bin/sh +# captive portal auto-login script for german ICE hotspots +# written by Dirk Brenken (dev@brenken.org) + +# This is free software, licensed under the GNU General Public License v3. +# You should have received a copy of the GNU General Public License +# along with this program. If not, see <http://www.gnu.org/licenses/>. + +domain="www.wifionice.de" +cmd="$(command -v curl)" + +# curl check +# +if [ ! -x "${cmd}" ] +then + exit 1 +fi + +# initial get request to receive & extract a valid security token +# +"${cmd}" "http://${domain}/en/" -s -o /dev/null -c "/tmp/${domain}.cookie" +if [ -f "/tmp/${domain}.cookie" ] +then + sec_token="$(awk '/csrf/{print $7}' "/tmp/${domain}.cookie")" + rm -f "/tmp/${domain}.cookie" +else + exit 2 +fi + +# final post request/login with valid session cookie/security token +# +if [ -n "${sec_token}" ] +then + "${cmd}" "http://${domain}/en/" -H "Cookie: csrf=${sec_token}" --data "login=true&CSRFToken=${sec_token}&connect=" +else + exit 3 +fi diff --git a/net/ulogd/Makefile b/net/ulogd/Makefile index 201f05f25..a6fee7a73 100644 --- a/net/ulogd/Makefile +++ b/net/ulogd/Makefile @@ -9,26 +9,33 @@ include $(TOPDIR)/rules.mk PKG_NAME:=ulogd PKG_VERSION:=2.0.7 -PKG_RELEASE:=1 +PKG_RELEASE:=3 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 PKG_SOURCE_URL:=https://netfilter.org/projects/ulogd/files/ \ ftp://ftp.netfilter.org/pub/ulogd/ PKG_HASH:=990a05494d9c16029ba0a83f3b7294fc05c756546b8d60d1c1572dc25249a92b -PKG_LICENSE:=GPL-2.0 +PKG_MAINTAINER:=Alexandru Ardelean <ardeleanalex@gmail.com> +PKG_LICENSE:=GPL-2.0-only PKG_LICENSE_FILES:=COPYING -PKG_MAINTAINER:=Nicolas Thill <nico@openwrt.org> -PKG_FIXUP:=autoreconf PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 + +PKG_BUILD_DEPENDS:=libnetfilter-acct libnetfilter-conntrack libnetfilter-log +PKG_CONFIG_DEPENDS:= \ + CONFIG_PACKAGE_ulogd-mod-dbi \ + CONFIG_PACKAGE_ulogd-mod-mysql \ + CONFIG_PACKAGE_ulogd-mod-pgsql \ + CONFIG_PACKAGE_ulogd-mod-sqlite include $(INCLUDE_DIR)/package.mk define Package/ulogd/Default SECTION:=net CATEGORY:=Network - URL:=http://www.netfilter.org/projects/ulogd/index.html + URL:=https://www.netfilter.org/projects/ulogd/index.html endef define Package/ulogd @@ -114,21 +121,10 @@ define Package/ulogd-mod-extra TITLE:=Extra plugins endef -PKG_BUILD_DEPENDS:=libnetfilter-acct libnetfilter-conntrack libnetfilter-log - -PKG_CONFIG_DEPENDS:= \ - CONFIG_PACKAGE_ulogd-mod-dbi \ - CONFIG_PACKAGE_ulogd-mod-mysql \ - CONFIG_PACKAGE_ulogd-mod-pgsql \ - CONFIG_PACKAGE_ulogd-mod-sqlite \ - -TARGET_CFLAGS += \ - -D_GNU_SOURCE \ - CONFIGURE_ARGS += \ --enable-nfacct \ --enable-nfct \ - --enable-nflog \ + --enable-nflog ifneq ($(DEVELOPER)$(SDK)$(CONFIG_PACKAGE_ulogd-mod-dbi),) CONFIGURE_ARGS += --with-dbi \ diff --git a/net/ulogd/patches/010-json-remote.patch b/net/ulogd/patches/010-json-remote.patch new file mode 100644 index 000000000..a250e0631 --- /dev/null +++ b/net/ulogd/patches/010-json-remote.patch @@ -0,0 +1,441 @@ +From 9d9ea2cd70a369a7f665a322e6c53631e01a2570 Mon Sep 17 00:00:00 2001 +From: Andreas Jaggi <andreas.jaggi@waterwave.ch> +Date: Wed, 30 May 2018 22:15:36 +0200 +Subject: ulogd: json: send messages to a remote host / unix socket + +Extend the JSON output plugin so that the generated JSON stream can be +sent to a remote host via TCP/UDP or to a local unix socket. + +Signed-off-by: Andreas Jaggi <andreas.jaggi@waterwave.ch> +Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org> +--- + output/ulogd_output_JSON.c | 291 +++++++++++++++++++++++++++++++++++++++++---- + ulogd.conf.in | 11 ++ + 2 files changed, 281 insertions(+), 21 deletions(-) + +diff --git a/output/ulogd_output_JSON.c b/output/ulogd_output_JSON.c +index 4d8e3e9..6edfa90 100644 +--- a/output/ulogd_output_JSON.c ++++ b/output/ulogd_output_JSON.c +@@ -20,10 +20,15 @@ + + #include <stdio.h> + #include <stdlib.h> ++#include <unistd.h> + #include <string.h> + #include <time.h> + #include <errno.h> + #include <inttypes.h> ++#include <sys/types.h> ++#include <sys/socket.h> ++#include <sys/un.h> ++#include <netdb.h> + #include <ulogd/ulogd.h> + #include <ulogd/conffile.h> + #include <jansson.h> +@@ -36,6 +41,10 @@ + #define ULOGD_JSON_DEFAULT_DEVICE "Netfilter" + #endif + ++#define host_ce(x) (x->ces[JSON_CONF_HOST]) ++#define port_ce(x) (x->ces[JSON_CONF_PORT]) ++#define mode_ce(x) (x->ces[JSON_CONF_MODE]) ++#define file_ce(x) (x->ces[JSON_CONF_FILENAME]) + #define unlikely(x) __builtin_expect((x),0) + + struct json_priv { +@@ -44,6 +53,15 @@ struct json_priv { + int usec_idx; + long cached_gmtoff; + char cached_tz[6]; /* eg +0200 */ ++ int mode; ++ int sock; ++}; ++ ++enum json_mode { ++ JSON_MODE_FILE = 0, ++ JSON_MODE_TCP, ++ JSON_MODE_UDP, ++ JSON_MODE_UNIX + }; + + enum json_conf { +@@ -53,6 +71,9 @@ enum json_conf { + JSON_CONF_EVENTV1, + JSON_CONF_DEVICE, + JSON_CONF_BOOLEAN_LABEL, ++ JSON_CONF_MODE, ++ JSON_CONF_HOST, ++ JSON_CONF_PORT, + JSON_CONF_MAX + }; + +@@ -95,15 +116,167 @@ static struct config_keyset json_kset = { + .options = CONFIG_OPT_NONE, + .u = { .value = 0 }, + }, ++ [JSON_CONF_MODE] = { ++ .key = "mode", ++ .type = CONFIG_TYPE_STRING, ++ .options = CONFIG_OPT_NONE, ++ .u = { .string = "file" }, ++ }, ++ [JSON_CONF_HOST] = { ++ .key = "host", ++ .type = CONFIG_TYPE_STRING, ++ .options = CONFIG_OPT_NONE, ++ .u = { .string = "127.0.0.1" }, ++ }, ++ [JSON_CONF_PORT] = { ++ .key = "port", ++ .type = CONFIG_TYPE_STRING, ++ .options = CONFIG_OPT_NONE, ++ .u = { .string = "12345" }, ++ }, + }, + }; + ++static void close_socket(struct json_priv *op) { ++ if (op->sock != -1) { ++ close(op->sock); ++ op->sock = -1; ++ } ++} ++ ++static int _connect_socket_unix(struct ulogd_pluginstance *pi) ++{ ++ struct json_priv *op = (struct json_priv *) &pi->private; ++ struct sockaddr_un u_addr; ++ int sfd; ++ ++ close_socket(op); ++ ++ ulogd_log(ULOGD_DEBUG, "connecting to unix:%s\n", ++ file_ce(pi->config_kset).u.string); ++ ++ sfd = socket(AF_UNIX, SOCK_STREAM, 0); ++ if (sfd == -1) { ++ return -1; ++ } ++ u_addr.sun_family = AF_UNIX; ++ strncpy(u_addr.sun_path, file_ce(pi->config_kset).u.string, ++ sizeof(u_addr.sun_path) - 1); ++ if (connect(sfd, (struct sockaddr *) &u_addr, sizeof(struct sockaddr_un)) == -1) { ++ close(sfd); ++ return -1; ++ } ++ ++ op->sock = sfd; ++ ++ return 0; ++} ++ ++static int _connect_socket_net(struct ulogd_pluginstance *pi) ++{ ++ struct json_priv *op = (struct json_priv *) &pi->private; ++ struct addrinfo hints; ++ struct addrinfo *result, *rp; ++ int sfd, s; ++ ++ close_socket(op); ++ ++ ulogd_log(ULOGD_DEBUG, "connecting to %s:%s\n", ++ host_ce(pi->config_kset).u.string, ++ port_ce(pi->config_kset).u.string); ++ ++ memset(&hints, 0, sizeof(struct addrinfo)); ++ hints.ai_family = AF_UNSPEC; ++ hints.ai_socktype = op->mode == JSON_MODE_UDP ? SOCK_DGRAM : SOCK_STREAM; ++ hints.ai_protocol = 0; ++ hints.ai_flags = 0; ++ ++ s = getaddrinfo(host_ce(pi->config_kset).u.string, ++ port_ce(pi->config_kset).u.string, &hints, &result); ++ if (s != 0) { ++ ulogd_log(ULOGD_ERROR, "getaddrinfo: %s\n", gai_strerror(s)); ++ return -1; ++ } ++ ++ for (rp = result; rp != NULL; rp = rp->ai_next) { ++ int on = 1; ++ ++ sfd = socket(rp->ai_family, rp->ai_socktype, ++ rp->ai_protocol); ++ if (sfd == -1) ++ continue; ++ ++ setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, ++ (char *) &on, sizeof(on)); ++ ++ if (connect(sfd, rp->ai_addr, rp->ai_addrlen) != -1) ++ break; ++ ++ close(sfd); ++ } ++ ++ freeaddrinfo(result); ++ ++ if (rp == NULL) { ++ return -1; ++ } ++ ++ op->sock = sfd; ++ ++ return 0; ++} ++ ++static int _connect_socket(struct ulogd_pluginstance *pi) ++{ ++ struct json_priv *op = (struct json_priv *) &pi->private; ++ ++ if (op->mode == JSON_MODE_UNIX) ++ return _connect_socket_unix(pi); ++ else ++ return _connect_socket_net(pi); ++} ++ ++static int json_interp_socket(struct ulogd_pluginstance *upi, char *buf, int buflen) ++{ ++ struct json_priv *opi = (struct json_priv *) &upi->private; ++ int ret = 0; ++ ++ if (opi->sock != -1) ++ ret = send(opi->sock, buf, buflen, MSG_NOSIGNAL); ++ free(buf); ++ if (ret != buflen) { ++ ulogd_log(ULOGD_ERROR, "Failure sending message: %s\n", ++ strerror(errno)); ++ if (ret == -1 || opi->sock == -1) ++ return _connect_socket(upi); ++ else ++ return ULOGD_IRET_ERR; ++ } ++ ++ return ULOGD_IRET_OK; ++} ++ ++static int json_interp_file(struct ulogd_pluginstance *upi, char *buf) ++{ ++ struct json_priv *opi = (struct json_priv *) &upi->private; ++ ++ fprintf(opi->of, "%s", buf); ++ free(buf); ++ ++ if (upi->config_kset->ces[JSON_CONF_SYNC].u.value != 0) ++ fflush(opi->of); ++ ++ return ULOGD_IRET_OK; ++} ++ + #define MAX_LOCAL_TIME_STRING 38 + + static int json_interp(struct ulogd_pluginstance *upi) + { + struct json_priv *opi = (struct json_priv *) &upi->private; + unsigned int i; ++ char *buf; ++ int buflen; + json_t *msg; + + msg = json_object(); +@@ -218,34 +391,65 @@ static int json_interp(struct ulogd_pluginstance *upi) + } + } + +- json_dumpf(msg, opi->of, 0); +- fprintf(opi->of, "\n"); + ++ buf = json_dumps(msg, 0); + json_decref(msg); ++ if (buf == NULL) { ++ ulogd_log(ULOGD_ERROR, "Could not create message\n"); ++ return ULOGD_IRET_ERR; ++ } ++ buflen = strlen(buf); ++ buf = realloc(buf, sizeof(char)*(buflen+2)); ++ if (buf == NULL) { ++ ulogd_log(ULOGD_ERROR, "Could not create message\n"); ++ return ULOGD_IRET_ERR; ++ } ++ strncat(buf, "\n", 1); ++ buflen++; + +- if (upi->config_kset->ces[JSON_CONF_SYNC].u.value != 0) +- fflush(opi->of); ++ if (opi->mode == JSON_MODE_FILE) ++ return json_interp_file(upi, buf); ++ else ++ return json_interp_socket(upi, buf, buflen); ++} + +- return ULOGD_IRET_OK; ++static void reopen_file(struct ulogd_pluginstance *upi) ++{ ++ struct json_priv *oi = (struct json_priv *) &upi->private; ++ FILE *old = oi->of; ++ ++ ulogd_log(ULOGD_NOTICE, "JSON: reopening logfile\n"); ++ oi->of = fopen(upi->config_kset->ces[0].u.string, "a"); ++ if (!oi->of) { ++ ulogd_log(ULOGD_ERROR, "can't open JSON " ++ "log file: %s\n", ++ strerror(errno)); ++ oi->of = old; ++ } else { ++ fclose(old); ++ } ++} ++ ++static void reopen_socket(struct ulogd_pluginstance *upi) ++{ ++ ulogd_log(ULOGD_NOTICE, "JSON: reopening socket\n"); ++ if (_connect_socket(upi) < 0) { ++ ulogd_log(ULOGD_ERROR, "can't open JSON " ++ "socket: %s\n", ++ strerror(errno)); ++ } + } + + static void sighup_handler_print(struct ulogd_pluginstance *upi, int signal) + { + struct json_priv *oi = (struct json_priv *) &upi->private; +- FILE *old = oi->of; + + switch (signal) { + case SIGHUP: +- ulogd_log(ULOGD_NOTICE, "JSON: reopening logfile\n"); +- oi->of = fopen(upi->config_kset->ces[0].u.string, "a"); +- if (!oi->of) { +- ulogd_log(ULOGD_ERROR, "can't open JSON " +- "log file: %s\n", +- strerror(errno)); +- oi->of = old; +- } else { +- fclose(old); +- } ++ if (oi->mode == JSON_MODE_FILE) ++ reopen_file(upi); ++ else ++ reopen_socket(upi); + break; + default: + break; +@@ -255,6 +459,8 @@ static void sighup_handler_print(struct ulogd_pluginstance *upi, int signal) + static int json_configure(struct ulogd_pluginstance *upi, + struct ulogd_pluginstance_stack *stack) + { ++ struct json_priv *op = (struct json_priv *) &upi->private; ++ char *mode_str = mode_ce(upi->config_kset).u.string; + int ret; + + ret = ulogd_wildcard_inputkeys(upi); +@@ -265,13 +471,25 @@ static int json_configure(struct ulogd_pluginstance *upi, + if (ret < 0) + return ret; + ++ if (!strcasecmp(mode_str, "udp")) { ++ op->mode = JSON_MODE_UDP; ++ } else if (!strcasecmp(mode_str, "tcp")) { ++ op->mode = JSON_MODE_TCP; ++ } else if (!strcasecmp(mode_str, "unix")) { ++ op->mode = JSON_MODE_UNIX; ++ } else if (!strcasecmp(mode_str, "file")) { ++ op->mode = JSON_MODE_FILE; ++ } else { ++ ulogd_log(ULOGD_ERROR, "unknown mode '%s'\n", mode_str); ++ return -EINVAL; ++ } ++ + return 0; + } + +-static int json_init(struct ulogd_pluginstance *upi) ++static int json_init_file(struct ulogd_pluginstance *upi) + { + struct json_priv *op = (struct json_priv *) &upi->private; +- unsigned int i; + + op->of = fopen(upi->config_kset->ces[0].u.string, "a"); + if (!op->of) { +@@ -280,6 +498,27 @@ static int json_init(struct ulogd_pluginstance *upi) + return -1; + } + ++ return 0; ++} ++ ++static int json_init_socket(struct ulogd_pluginstance *upi) ++{ ++ struct json_priv *op = (struct json_priv *) &upi->private; ++ ++ if (host_ce(upi->config_kset).u.string == NULL) ++ return -1; ++ if (port_ce(upi->config_kset).u.string == NULL) ++ return -1; ++ ++ op->sock = -1; ++ return _connect_socket(upi); ++} ++ ++static int json_init(struct ulogd_pluginstance *upi) ++{ ++ struct json_priv *op = (struct json_priv *) &upi->private; ++ unsigned int i; ++ + /* search for time */ + op->sec_idx = -1; + op->usec_idx = -1; +@@ -293,15 +532,25 @@ static int json_init(struct ulogd_pluginstance *upi) + + *op->cached_tz = '\0'; + +- return 0; ++ if (op->mode == JSON_MODE_FILE) ++ return json_init_file(upi); ++ else ++ return json_init_socket(upi); ++} ++ ++static void close_file(FILE *of) { ++ if (of != stdout) ++ fclose(of); + } + + static int json_fini(struct ulogd_pluginstance *pi) + { + struct json_priv *op = (struct json_priv *) &pi->private; + +- if (op->of != stdout) +- fclose(op->of); ++ if (op->mode == JSON_MODE_FILE) ++ close_file(op->of); ++ else ++ close_socket(op); + + return 0; + } +diff --git a/ulogd.conf.in b/ulogd.conf.in +index 62222db..99cfc24 100644 +--- a/ulogd.conf.in ++++ b/ulogd.conf.in +@@ -213,6 +213,17 @@ sync=1 + # Uncomment the following line to use JSON v1 event format that + # can provide better compatility with some JSON file reader. + #eventv1=1 ++# Uncomment the following lines to send the JSON logs to a remote host via UDP ++#mode="udp" ++#host="192.0.2.10" ++#port="10210" ++# Uncomment the following lines to send the JSON logs to a remote host via TCP ++#mode="tcp" ++#host="192.0.2.10" ++#port="10210" ++# Uncomment the following lines to send the JSON logs to a local unix socket ++#mode="unix" ++#file="/var/run/ulogd.socket" + + [pcap1] + #default file is /var/log/ulogd.pcap +-- +cgit v1.2.1 + diff --git a/net/xl2tpd/Makefile b/net/xl2tpd/Makefile index d8a97a226..678892cf2 100644 --- a/net/xl2tpd/Makefile +++ b/net/xl2tpd/Makefile @@ -8,7 +8,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=xl2tpd -PKG_VERSION:=1.3.13 +PKG_VERSION:=1.3.14 PKG_RELEASE:=1 PKG_MAINTAINER:=Yousong Zhou <yszhou4tech@gmail.com> PKG_LICENSE:=GPL-2.0 @@ -19,7 +19,7 @@ PKG_SOURCE_URL:=https://github.com/xelerance/xl2tpd.git PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) PKG_SOURCE_VERSION:=v$(PKG_VERSION) PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_MIRROR_HASH:=dbff24b8720063b510e711091459d6928d0bef332ead838aa63ed179ca02219e +PKG_MIRROR_HASH:=4a35bb75bdd05964b1438771483a79a52eed2e30d0bc85e7481bf951d3bc0b96 PKG_BUILD_DEPENDS:=libpcap diff --git a/net/yggdrasil/Makefile b/net/yggdrasil/Makefile index 9c35acec2..dfa0459df 100644 --- a/net/yggdrasil/Makefile +++ b/net/yggdrasil/Makefile @@ -1,16 +1,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=yggdrasil -PKG_VERSION:=0.3.5 -PKG_RELEASE:=4 +PKG_VERSION:=0.3.6 +PKG_RELEASE:=1 -PKG_SOURCE_URL:=https://codeload.github.com/yggdrasil-network/yggdrasil-go/tar.gz/v$(PKG_VERSION)? PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_HASH:=2c69029adeb053ad049e90f1e4b7efa986094779868da77464d3c869984e861b +PKG_SOURCE_URL:=https://codeload.github.com/yggdrasil-network/yggdrasil-go/tar.gz/v$(PKG_VERSION)? +PKG_HASH:=dc1699064319f19a64ac57bac366a15d718008fdb75ef03bf4252d3552dff4eb PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-go-$(PKG_VERSION) -PKG_LICENSE:=GPL-3.0 PKG_MAINTAINER:=William Fleurant <meshnet@protonmail.com> +PKG_LICENSE:=LGPL-3.0-only +PKG_LICENSE_FILES:=LICENSE PKG_BUILD_DEPENDS:=golang/host PKG_BUILD_PARALLEL:=1 diff --git a/net/yggdrasil/files/yggdrasil.init b/net/yggdrasil/files/yggdrasil.init index afdcdb433..6ad3f6583 100755 --- a/net/yggdrasil/files/yggdrasil.init +++ b/net/yggdrasil/files/yggdrasil.init @@ -11,8 +11,10 @@ start_service() procd_open_instance procd_set_param respawn - procd_set_param command /bin/ash -c "/usr/sbin/yggdrasil -useconffile /etc/yggdrasil.conf | logger -t yggdrasil" + procd_set_param command /usr/sbin/yggdrasil -useconffile /etc/yggdrasil.conf + procd_set_param stdout 1 + procd_set_param stderr 1 procd_close_instance } diff --git a/net/zerotier/Makefile b/net/zerotier/Makefile index 7cf5a44cb..4a69d5e6b 100644 --- a/net/zerotier/Makefile +++ b/net/zerotier/Makefile @@ -6,12 +6,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=zerotier -PKG_VERSION:=1.2.12 -PKG_RELEASE:=4 +PKG_VERSION:=1.4.2 +PKG_RELEASE:=1 -PKG_SOURCE_URL:=https://codeload.github.com/zerotier/ZeroTierOne/tar.gz/$(PKG_VERSION)? PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_HASH:=212799bfaeb5e7dff20f2cd83f15742c8e13b8e9535606cfb85abcfb5fb6fed4 +PKG_SOURCE_URL:=https://codeload.github.com/zerotier/ZeroTierOne/tar.gz/$(PKG_VERSION)? +PKG_HASH:=557a444127812384265ec97232bae43dce1d4b1545ddd72e2b1646c971dad7c5 PKG_BUILD_DIR:=$(BUILD_DIR)/ZeroTierOne-$(PKG_VERSION) PKG_MAINTAINER:=Moritz Warning <moritzwarning@web.de> @@ -56,6 +56,10 @@ endef TARGET_CFLAGS += -ffunction-sections -fdata-sections TARGET_LDFLAGS += -Wl,--gc-sections +ifdef CONFIG_USE_UCLIBC + TARGET_CFLAGS += -D'valloc(a)=aligned_alloc(getpagesize(),a)' +endif + define Package/zerotier/conffiles /etc/config/zerotier endef diff --git a/net/zerotier/patches/0001-find-miniupnpc.h-in-staging-directory.patch b/net/zerotier/patches/0001-find-miniupnpc.h-in-staging-directory.patch index 5ea4d697e..7aa9c7ead 100644 --- a/net/zerotier/patches/0001-find-miniupnpc.h-in-staging-directory.patch +++ b/net/zerotier/patches/0001-find-miniupnpc.h-in-staging-directory.patch @@ -1,4 +1,4 @@ -From c578216351a4daa3916265b39b14f7c23ef15c90 Mon Sep 17 00:00:00 2001 +From 14454285d7ef5b9cd134c86059933036c1aa2fef Mon Sep 17 00:00:00 2001 From: Moritz Warning <moritzwarning@web.de> Date: Mon, 23 Apr 2018 22:12:31 +0200 Subject: [PATCH 1/4] find miniupnpc.h in staging directory @@ -8,10 +8,10 @@ Subject: [PATCH 1/4] find miniupnpc.h in staging directory 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/make-linux.mk b/make-linux.mk -index 2e6a8632..0cd955d1 100644 +index b81c7aeb..a547125d 100644 --- a/make-linux.mk +++ b/make-linux.mk -@@ -22,8 +22,8 @@ ONE_OBJS+=osdep/LinuxEthernetTap.o +@@ -29,8 +29,8 @@ TIMESTAMP=$(shell date +"%Y%m%d%H%M") # otherwise build into binary as done on Mac and Windows. ONE_OBJS+=osdep/PortMapper.o override DEFS+=-DZT_USE_MINIUPNPC @@ -23,5 +23,5 @@ index 2e6a8632..0cd955d1 100644 override DEFS+=-DZT_USE_SYSTEM_MINIUPNPC LDLIBS+=-lminiupnpc -- -2.17.0 +2.22.0 diff --git a/net/zerotier/patches/0002-remove-pie.patch b/net/zerotier/patches/0002-remove-pie.patch index 849cb20f9..8f90d8f0a 100644 --- a/net/zerotier/patches/0002-remove-pie.patch +++ b/net/zerotier/patches/0002-remove-pie.patch @@ -1,4 +1,4 @@ -From 7cfe751128d412a9b780ba5e4cb11908fc71cd3d Mon Sep 17 00:00:00 2001 +From 68fe97ef6b05e3709cd4b67c7681dcfc63bfaf80 Mon Sep 17 00:00:00 2001 From: Moritz Warning <moritzwarning@web.de> Date: Mon, 30 Apr 2018 16:14:30 +0200 Subject: [PATCH 2/4] remove -pie @@ -10,10 +10,10 @@ when making a shared object; recompile with -fPIC" error 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/make-linux.mk b/make-linux.mk -index 0cd955d1..add1d3ae 100644 +index a547125d..13244741 100644 --- a/make-linux.mk +++ b/make-linux.mk -@@ -63,11 +63,11 @@ ifeq ($(ZT_DEBUG),1) +@@ -77,11 +77,11 @@ ifeq ($(ZT_DEBUG),1) # C25519 in particular is almost UNUSABLE in -O0 even on a 3ghz box! node/Salsa20.o node/SHA512.o node/C25519.o node/Poly1305.o: CXXFLAGS=-Wall -O2 -g -pthread $(INCLUDES) $(DEFS) else @@ -29,5 +29,5 @@ index 0cd955d1..add1d3ae 100644 STRIP+=--strip-all endif -- -2.17.0 +2.22.0 diff --git a/net/zerotier/patches/0003-remove-arm32-conservative-CFLAGS.patch b/net/zerotier/patches/0003-remove-arm32-conservative-CFLAGS.patch index ea74d0d8c..f2189b97f 100644 --- a/net/zerotier/patches/0003-remove-arm32-conservative-CFLAGS.patch +++ b/net/zerotier/patches/0003-remove-arm32-conservative-CFLAGS.patch @@ -1,6 +1,17 @@ +From a856855ab97e0775a08e1571a4ad26c264cb13f4 Mon Sep 17 00:00:00 2001 +From: Moritz Warning <moritzwarning@web.de> +Date: Sun, 4 Aug 2019 03:56:37 +0200 +Subject: [PATCH 3/4] remove arm32 conservative CFLAGS + +--- + make-linux.mk | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/make-linux.mk b/make-linux.mk +index 13244741..fd164dfa 100644 --- a/make-linux.mk +++ b/make-linux.mk -@@ -231,7 +231,7 @@ ifeq ($(ZT_OFFICIAL),1) +@@ -262,7 +262,7 @@ ifeq ($(ZT_OFFICIAL),1) endif # ARM32 hell -- use conservative CFLAGS @@ -9,3 +20,6 @@ ifeq ($(shell if [ -e /usr/bin/dpkg ]; then dpkg --print-architecture; fi),armel) override CFLAGS+=-march=armv5 -mfloat-abi=soft -msoft-float -mno-unaligned-access -marm override CXXFLAGS+=-march=armv5 -mfloat-abi=soft -msoft-float -mno-unaligned-access -marm +-- +2.22.0 + diff --git a/net/zerotier/patches/0004-accept-external-linker-flags.patch b/net/zerotier/patches/0004-accept-external-linker-flags.patch index d50143a61..a28f4d826 100644 --- a/net/zerotier/patches/0004-accept-external-linker-flags.patch +++ b/net/zerotier/patches/0004-accept-external-linker-flags.patch @@ -1,4 +1,4 @@ -From a2cf8bf645d25f18cbc2ed7ad4b9a25725811afd Mon Sep 17 00:00:00 2001 +From 5169e5328525af28f6b7de087ece10a9bc0a2282 Mon Sep 17 00:00:00 2001 From: Moritz Warning <moritzwarning@web.de> Date: Wed, 2 May 2018 16:06:46 +0200 Subject: [PATCH 4/4] accept external linker flags @@ -8,10 +8,10 @@ Subject: [PATCH 4/4] accept external linker flags 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/make-linux.mk b/make-linux.mk -index 49e14f70..8e766bfb 100644 +index fd164dfa..29ff8813 100644 --- a/make-linux.mk +++ b/make-linux.mk -@@ -67,7 +67,7 @@ else +@@ -81,7 +81,7 @@ else override CFLAGS+=-Wall -Wno-deprecated -pthread $(INCLUDES) -DNDEBUG $(DEFS) CXXFLAGS?=-O3 -fstack-protector override CXXFLAGS+=-Wall -Wno-deprecated -std=c++11 -pthread $(INCLUDES) -DNDEBUG $(DEFS) @@ -21,5 +21,5 @@ index 49e14f70..8e766bfb 100644 STRIP+=--strip-all endif -- -2.17.0 +2.22.0 diff --git a/net/zerotier/patches/0005-link-natpmp.patch b/net/zerotier/patches/0005-link-natpmp.patch new file mode 100644 index 000000000..d1c820947 --- /dev/null +++ b/net/zerotier/patches/0005-link-natpmp.patch @@ -0,0 +1,11 @@ +--- a/make-linux.mk ++++ b/make-linux.mk +@@ -38,7 +38,7 @@ else + override DEFS+=-DMINIUPNP_STATICLIB -DMINIUPNPC_SET_SOCKET_TIMEOUT -DMINIUPNPC_GET_SRC_ADDR -D_BSD_SOURCE -D_DEFAULT_SOURCE -D_XOPEN_SOURCE=600 -DOS_STRING=\"Linux\" -DMINIUPNPC_VERSION_STRING=\"2.0\" -DUPNP_VERSION_STRING=\"UPnP/1.1\" -DENABLE_STRNATPMPERR + ONE_OBJS+=ext/miniupnpc/connecthostport.o ext/miniupnpc/igd_desc_parse.o ext/miniupnpc/minisoap.o ext/miniupnpc/minissdpc.o ext/miniupnpc/miniupnpc.o ext/miniupnpc/miniwget.o ext/miniupnpc/minixml.o ext/miniupnpc/portlistingparse.o ext/miniupnpc/receivedata.o ext/miniupnpc/upnpcommands.o ext/miniupnpc/upnpdev.o ext/miniupnpc/upnperrors.o ext/miniupnpc/upnpreplyparse.o + endif +-ifeq ($(wildcard /usr/include/natpmp.h),) ++ifeq ($(wildcard $(STAGING_DIR)/usr/include/natpmp.h),) + ONE_OBJS+=ext/libnatpmp/natpmp.o ext/libnatpmp/getgateway.o + else + LDLIBS+=-lnatpmp diff --git a/sound/mpd/Makefile b/sound/mpd/Makefile index cf3d291bd..609f3a375 100644 --- a/sound/mpd/Makefile +++ b/sound/mpd/Makefile @@ -7,14 +7,14 @@ include $(TOPDIR)/rules.mk PKG_NAME:=mpd PKG_VERSION:=0.20.23 -PKG_RELEASE:=2 +PKG_RELEASE:=3 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=http://www.musicpd.org/download/mpd/0.20/ PKG_HASH:=503e5f9f237290f568ff7956ab2f9aed563594bf749f19b8fe994fb21434afea -PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net> -PKG_LICENSE:=GPL-2.0 +PKG_MAINTAINER:=Ted Hess <thess@kitschensync.net> +PKG_LICENSE:=GPL-2.0-or-later PKG_LICENSE_FILES:=COPYING PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-$(BUILD_VARIANT)/$(PKG_NAME)-$(PKG_VERSION) @@ -48,7 +48,7 @@ endef define Package/mpd-full $(call Package/mpd/Default) TITLE+= (full) - DEPENDS+= +libffmpeg +libid3tag +libmms +libupnp +libshout +pulseaudio-daemon + DEPENDS+= +libffmpeg +libid3tag +libmms +libupnp +libshout +pulseaudio-daemon +yajl PROVIDES:=mpd VARIANT:=full endef @@ -166,7 +166,8 @@ ifeq ($(BUILD_VARIANT),full) --enable-recorder-output \ --enable-shout \ --enable-pulse \ - --disable-vorbis + --disable-vorbis \ + --enable-soundcloud endif ifeq ($(BUILD_VARIANT),mini) @@ -179,6 +180,7 @@ ifeq ($(BUILD_VARIANT),mini) --disable-id3 \ --disable-mms \ --disable-shout \ + --disable-soundcloud \ --disable-pulse \ --enable-vorbis \ --with-tremor=yes \ diff --git a/sound/upmpdcli/Makefile b/sound/upmpdcli/Makefile index 259e8c9c3..3d1a614a0 100644 --- a/sound/upmpdcli/Makefile +++ b/sound/upmpdcli/Makefile @@ -8,19 +8,19 @@ include $(TOPDIR)/rules.mk PKG_NAME:=upmpdcli -PKG_VERSION:=1.4.0 +PKG_VERSION:=1.4.2 PKG_RELEASE:=1 +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz +PKG_SOURCE_URL:=https://www.lesbonscomptes.com/upmpdcli/downloads +PKG_HASH:=1d4489e76416b9cff2e98a68243d56bf46303890752ac18db382be2859b62bc3 + PKG_MAINTAINER:=Petko Bordjukov <bordjukov@gmail.com> -PKG_LICENSE:=GPL-2.0 +PKG_LICENSE:=LGPL-2.1-or-later PKG_LICENSE_FILES:=COPYING -PKG_SOURCE_URL:=https://www.lesbonscomptes.com/upmpdcli/downloads -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz -PKG_HASH:=28742910fa16b72f0c4e5b7dc561f59aa7f1a5fdd3e8e4f72f359d2e4af90d35 - -PKG_FIXUP:=autoreconf PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 include $(INCLUDE_DIR)/package.mk diff --git a/sound/upmpdcli/files/upmpdcli.init b/sound/upmpdcli/files/upmpdcli.init index 6c69209c2..061628f94 100644 --- a/sound/upmpdcli/files/upmpdcli.init +++ b/sound/upmpdcli/files/upmpdcli.init @@ -16,7 +16,9 @@ append_arg() { local val config_get val "$cfg" "$var" - [ -n "$val" -o -n "$def" ] && procd_append_param command $opt "${val:-$def}" + if [ -n "$val" ] || [ -n "$def" ]; then + procd_append_param command "$opt" "${val:-$def}" + fi } start_instance() { diff --git a/sound/upmpdcli/patches/100-Use-uint64_t-instead-of-u_int64_t.patch b/sound/upmpdcli/patches/100-Use-uint64_t-instead-of-u_int64_t.patch deleted file mode 100644 index de0da18bb..000000000 --- a/sound/upmpdcli/patches/100-Use-uint64_t-instead-of-u_int64_t.patch +++ /dev/null @@ -1,68 +0,0 @@ -From 256394399f57ba6e3057ee2c981127a14e4623f8 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?J=C3=B6rg=20Krause?= <joerg.krause@embedded.rocks> -Date: Tue, 22 Jan 2019 09:07:56 +0100 -Subject: [PATCH] Use uint64_t instead of u_int64_t -MIME-Version: 1.0 -Content-Type: text/plain; charset=UTF-8 -Content-Transfer-Encoding: 8bit - -`uintN_t` is standard C99 type available in `<stdint.h>`, whereas `u_intN_t` -is defined `<sys/types.h>` - -As upmpdcli already uses the `uintN_t` type, replace the few existing -`u_intN_t` types, as it breaks build with the musl C library, which is -very strict, because of the missing `<sys/types.h>`: - -``` -src/mediaserver/cdplugins/netfetch.h:71:5: error: ‘u_int64_t’ does not name a type - u_int64_t datacount() { -``` ---- - src/mediaserver/cdplugins/netfetch.h | 12 ++++++------ - 1 file changed, 6 insertions(+), 6 deletions(-) - -diff --git a/src/mediaserver/cdplugins/netfetch.h b/src/mediaserver/cdplugins/netfetch.h -index d7e9df4..d105e53 100644 ---- a/src/mediaserver/cdplugins/netfetch.h -+++ b/src/mediaserver/cdplugins/netfetch.h -@@ -68,7 +68,7 @@ public: - /// Reset after transfer done, for retrying for exemple. - virtual bool reset() = 0; - -- u_int64_t datacount() { -+ uint64_t datacount() { - return fetch_data_count; - } - -@@ -84,11 +84,11 @@ public: - buf1cb = f; - } - // Called when the network transfer is done -- void setEOFetchCB(std::function<void(bool ok, u_int64_t count)> f) { -+ void setEOFetchCB(std::function<void(bool ok, uint64_t count)> f) { - eofcb = f; - } - // Called every time we get new data from the remote -- void setFetchBytesCB(std::function<void(u_int64_t count)> f) { -+ void setFetchBytesCB(std::function<void(uint64_t count)> f) { - fbcb = f; - } - -@@ -98,11 +98,11 @@ protected: - std::string _url; - uint64_t startoffset; - int timeoutsecs{0}; -- u_int64_t fetch_data_count{0}; -+ uint64_t fetch_data_count{0}; - BufXChange<ABuffer*> *outqueue{nullptr}; - std::function<bool(std::string&, void *, int)> buf1cb; -- std::function<void(u_int64_t)> fbcb; -- std::function<void(bool, u_int64_t)> eofcb; -+ std::function<void(uint64_t)> fbcb; -+ std::function<void(bool, uint64_t)> eofcb; - }; - - #endif /* _MEDIAFETCH_H_INCLUDED_ */ --- -2.11.0 - diff --git a/utils/attr/Makefile b/utils/attr/Makefile index 0a79cde72..eb107ce8f 100644 --- a/utils/attr/Makefile +++ b/utils/attr/Makefile @@ -9,18 +9,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=attr PKG_VERSION:=2.4.48 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=http://git.savannah.nongnu.org/cgit/attr.git/snapshot PKG_HASH:=095699f71230ace37e5bc680c6f9d15cf8e53eb38d00b2c46db5cc7e0712e5f3 -PKG_MAINTAINER:=Maxim Storchak <m.storchak@gmail.com> -PKG_LICENSE:=LGPL-2.1 GPL-2.0 -PKG_LICENSE_FILES:=doc/COPYING doc/COPYING.LGPL +PKG_MAINTAINER:=Maxim Storchak <m.storchak@gmail.com> -PKG_INSTALL:=1 PKG_FIXUP:=autoreconf +PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 include $(INCLUDE_DIR)/package.mk @@ -39,6 +38,8 @@ $(call Package/attr/Default) SECTION:=utils CATEGORY:=Utilities TITLE+=utils + LICENSE:=GPL-2.0-or-later + LICENSE_FILES:=doc/COPYING DEPENDS:=+libattr endef @@ -47,6 +48,8 @@ $(call Package/attr/Default) SECTION:=libs CATEGORY:=Libraries TITLE+=library + LICENSE:=LGPL-2.1-or-later + LICENSE_FILES:=doc/COPYING.LGPL endef define Package/libattr/description @@ -81,8 +84,8 @@ define Package/attr/conffiles endef define Build/InstallDev - mkdir -p $(1)/usr/include - mkdir -p $(1)/usr/lib/pkgconfig + $(INSTALL_DIR) $(1)/usr/include + $(INSTALL_DIR) $(1)/usr/lib/pkgconfig $(CP) $(PKG_INSTALL_DIR)/usr/{include,lib} $(1)/usr/ endef diff --git a/utils/attr/patches/110-Replace-bzero-with-memset.patch b/utils/attr/patches/110-Replace-bzero-with-memset.patch new file mode 100644 index 000000000..4607451ec --- /dev/null +++ b/utils/attr/patches/110-Replace-bzero-with-memset.patch @@ -0,0 +1,69 @@ +From 0ce120a140dadaa56875af2efc66ff805d37925b Mon Sep 17 00:00:00 2001 +From: Rosen Penev <rosenp@gmail.com> +Date: Sun, 11 Aug 2019 16:17:11 -0700 +Subject: [PATCH] attr: Replace bzero with memset + +bzero is a deprecated function that is optionally unavailable with +uClibc-ng. + +Signed-off-by: Rosen Penev <rosenp@gmail.com> +--- + include/attributes.h | 4 ++-- + libattr/libattr.c | 4 ++-- + tools/attr.c | 2 +- + 3 files changed, 5 insertions(+), 5 deletions(-) + +diff --git a/include/attributes.h b/include/attributes.h +index 14beb8f..039c817 100644 +--- a/include/attributes.h ++++ b/include/attributes.h +@@ -91,9 +91,9 @@ typedef struct attrlist_ent { /* data from attr_list() */ + * Implement a "cursor" for use in successive attr_list() calls. + * It provides a way to find the last attribute that was returned in the + * last attr_list() call so that we can get the next one without missing +- * any. This should be bzero()ed before use and whenever it is desired to ++ * any. This should be zeroed before use and whenever it is desired to + * start over from the beginning of the attribute list. The only valid +- * operation on a cursor is to bzero() it. ++ * operation on a cursor is to zero it. + */ + typedef struct attrlist_cursor { + uint32_t opaque[4]; /* an opaque cookie */ +diff --git a/libattr/libattr.c b/libattr/libattr.c +index d550e10..2ebd1c5 100644 +--- a/libattr/libattr.c ++++ b/libattr/libattr.c +@@ -298,7 +298,7 @@ attr_list(const char *path, char *buffer, const int buffersize, int flags, + errno = EINVAL; + return -1; + } +- bzero(buffer, sizeof(attrlist_t)); ++ memset(buffer, 0, sizeof(attrlist_t)); + + if (flags & ATTR_DONTFOLLOW) + length = llistxattr(path, lbuf, sizeof(lbuf)); +@@ -348,7 +348,7 @@ attr_listf(int fd, char *buffer, const int buffersize, int flags, + errno = EINVAL; + return -1; + } +- bzero(buffer, sizeof(attrlist_t)); ++ memset(buffer, 0, sizeof(attrlist_t)); + + length = flistxattr(fd, lbuf, sizeof(lbuf)); + if (length < 0) +diff --git a/tools/attr.c b/tools/attr.c +index c8aa0b4..312aef1 100644 +--- a/tools/attr.c ++++ b/tools/attr.c +@@ -228,7 +228,7 @@ main(int argc, char **argv) + perror("malloc"); + exit(1); + } +- bzero((char *)&cursor, sizeof(cursor)); ++ memset(&cursor, 0, sizeof(cursor)); + do { + error = attr_list(filename, buffer, BUFSIZE, + attrflags, &cursor); +-- +2.17.1 + diff --git a/utils/btrfs-progs/Makefile b/utils/btrfs-progs/Makefile index 0e72a87ef..3ad77c981 100644 --- a/utils/btrfs-progs/Makefile +++ b/utils/btrfs-progs/Makefile @@ -6,16 +6,16 @@ include $(TOPDIR)/rules.mk PKG_NAME:=btrfs-progs -PKG_VERSION:=5.1.1 +PKG_VERSION:=5.2.1 PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-v$(PKG_VERSION).tar.xz PKG_SOURCE_URL:=@KERNEL/linux/kernel/people/kdave/btrfs-progs -PKG_HASH:=9cb91b7de9e10aa6bbf2b003f60bb3f5e5b1984a8008fad7c4b2d3978f5ebe1b +PKG_HASH:=36ac4a0198ffff79d5800c537ea4b19769a8fd3ad870f75413d25b20e2d83233 PKG_BUILD_DIR:=$(BUILD_DIR)/$(PKG_NAME)-v$(PKG_VERSION) PKG_MAINTAINER:=Karel Kočí <karel.koci@nic.cz> -PKG_LICENSE:=GPL-2.0 +PKG_LICENSE:=GPL-2.0-only PKG_LICENSE_FILES:=COPYING PKG_INSTALL:=1 @@ -44,8 +44,8 @@ define Package/btrfs-progs/config source "$(SOURCE)/Config.in" endef -progs = btrfs btrfs-find-root btrfs-image btrfs-map-logical \ - btrfs-select-super btrfstune mkfs.btrfs +boxprogs = btrfsck mkfs.btrfs btrfs-image btrfstune btrfs-find-root +progs = btrfs-map-logical btrfs-select-super TARGET_CFLAGS += -ffunction-sections -fdata-sections TARGET_LDFLAGS += -Wl,--gc-sections -Wl,--as-needed @@ -61,6 +61,10 @@ ifneq ($(CONFIG_BTRFS_PROGS_ZSTD),y) CONFIGURE_ARGS += --disable-zstd endif +MAKE_INSTALL_FLAGS += BUILD_PROGRAMS=0 + +Build/Compile=$(call Build/Compile/Default,btrfs.box $(progs)) + define Build/InstallDev $(INSTALL_DIR) $(1)/usr/include $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/include/* $(1)/usr/include/ @@ -72,10 +76,11 @@ define Package/btrfs-progs/install $(CP) $(PKG_INSTALL_DIR)/usr/lib/libbtrfs.so* $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/lib/libbtrfsutil.so* $(1)/usr/lib $(INSTALL_DIR) $(1)/usr/bin - $(INSTALL_BIN) $(addprefix $(PKG_INSTALL_DIR)/usr/bin/, $(progs)) $(1)/usr/bin/ - $(LN) btrfs $(1)/usr/bin/btrfsck - $(INSTALL_DIR) $(1)/etc/init.d - $(INSTALL_BIN) ./files/btrfs-scan.init $(1)/etc/init.d/btrfs-scan + $(INSTALL_BIN) $(PKG_BUILD_DIR)/btrfs.box $(1)/usr/bin/btrfs + $(foreach prog,$(boxprogs),$(LN) btrfs $(1)/usr/bin/$(prog);) + $(foreach prog,$(progs),$(INSTALL_BIN) $(PKG_BUILD_DIR)/$(prog) $(1)/usr/bin/;) + $(INSTALL_DIR) $(1)/lib/preinit + $(INSTALL_BIN) ./files/btrfs-scan.init $(1)/lib/preinit/85_btrfs_scan endef $(eval $(call BuildPackage,btrfs-progs)) diff --git a/utils/btrfs-progs/files/btrfs-scan.init b/utils/btrfs-progs/files/btrfs-scan.init index 762e0b840..608d3d6c5 100644 --- a/utils/btrfs-progs/files/btrfs-scan.init +++ b/utils/btrfs-progs/files/btrfs-scan.init @@ -1,9 +1,7 @@ -#!/bin/sh /etc/rc.common -# Copyright (C) 2014 OpenWrt.org +#!/bin/sh -START=19 - -start() { - grep -q btrfs /proc/filesystems && /usr/bin/btrfs device scan +preinit_btrfs_scan() { + grep -vq btrfs /proc/filesystems || btrfs device scan } +boot_hook_add preinit_main preinit_btrfs_scan diff --git a/utils/collectd/Makefile b/utils/collectd/Makefile index 94511271e..21a2285dd 100644 --- a/utils/collectd/Makefile +++ b/utils/collectd/Makefile @@ -9,7 +9,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=collectd PKG_VERSION:=5.9.0 -PKG_RELEASE:=3 +PKG_RELEASE:=4 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.bz2 PKG_SOURCE_URL:=https://collectd.org/files/ \ @@ -176,6 +176,7 @@ COLLECTD_PLUGINS_SELECTED:= \ teamspeak2 \ ted \ thermal \ + threshold \ unixsock \ uptime \ users \ @@ -418,6 +419,7 @@ $(eval $(call BuildPlugin,teamspeak2,TeamSpeak2 input,teamspeak2,)) $(eval $(call BuildPlugin,ted,The Energy Detective input,ted,)) $(eval $(call BuildPlugin,tcpconns,TCP connection tracking input,tcpconns,)) $(eval $(call BuildPlugin,thermal,system temperatures input,thermal,)) +$(eval $(call BuildPlugin,threshold,Notifications and thresholds,threshold,)) $(eval $(call BuildPlugin,unixsock,unix socket output,unixsock,)) $(eval $(call BuildPlugin,uptime,uptime status input,uptime,)) $(eval $(call BuildPlugin,users,user logged in status input,users,)) diff --git a/utils/collectd/files/collectd.init b/utils/collectd/files/collectd.init index 7b9c4623e..a9182380f 100644 --- a/utils/collectd/files/collectd.init +++ b/utils/collectd/files/collectd.init @@ -189,7 +189,7 @@ process_config() { config_get ReadThreads globals ReadThreads 2 printf "ReadThreads \"%s\"\n" "$ReadThreads" >> "$COLLECTD_CONF" - config_get Hostname globals Hostname "$(hostname)" + config_get Hostname globals Hostname "$(uname -n)" printf "Hostname \"%s\"\n" "$Hostname" >> "$COLLECTD_CONF" printf "\n" >> "$COLLECTD_CONF" @@ -199,16 +199,14 @@ process_config() { } start_service() { + process_config + procd_open_instance procd_set_param command /usr/sbin/collectd procd_append_param command -C "$COLLECTD_CONF" - procd_append_param command -f # don't daemonize, procd will handle that for us + procd_append_param command -f # don't daemonize procd_set_param nice "$NICEPRIO" - - process_config - - # set auto respawn behavior + procd_set_param stderr 1 procd_set_param respawn procd_close_instance } - diff --git a/utils/collectd/patches/920-fix-ping-droprate.patch b/utils/collectd/patches/920-fix-ping-droprate.patch deleted file mode 100644 index 6b1e9a0d4..000000000 --- a/utils/collectd/patches/920-fix-ping-droprate.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/ping.c -+++ b/src/ping.c -@@ -648,7 +648,7 @@ static int ping_read(void) /* {{{ */ - ((double)(pkg_recv * (pkg_recv - 1)))); - - /* Calculate drop rate. */ -- droprate = ((double)(pkg_sent - pkg_recv)) / ((double)pkg_sent); -+ droprate = ((double)(pkg_sent - pkg_recv)) * 100 / ((double)pkg_sent); - - submit(hl->host, "ping", latency_average); - submit(hl->host, "ping_stddev", latency_stddev); diff --git a/utils/docker-ce/Makefile b/utils/docker-ce/Makefile index 6518dee81..ced993ad3 100644 --- a/utils/docker-ce/Makefile +++ b/utils/docker-ce/Makefile @@ -1,15 +1,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=docker-ce -PKG_VERSION:=18.09.8 +PKG_VERSION:=19.03.1 PKG_RELEASE:=1 PKG_LICENSE:=Apache-2.0 PKG_LICENSE_FILES:=components/cli/LICENSE components/engine/LICENSE PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/docker/docker-ce/tar.gz/v$(PKG_VERSION)? -PKG_HASH:=33dfaf3cf296f8e9011ec6ed2de0125dfeaf8a938126f0218b0218a156c14014 -PKG_SOURCE_VERSION:=0dd43dd87f +PKG_HASH:=dad9123e203751ce9981bc34773721593655231c32412e310e748b18f10f0053 +PKG_SOURCE_VERSION:=74b1e89e8a PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com> @@ -23,7 +23,7 @@ endef # values from respective '.installer' files at https://github.com/docker/docker-ce/blob/v$(PKG_VERSION)/components/engine/hack/dockerfile/install/ $(eval $(call CheckExpectedSrcVer,../containerd/Makefile,894b81a4b802e4eb2a91d1ce216b8817763c29fb)) -$(eval $(call CheckExpectedSrcVer,../libnetwork/Makefile,e7933d41e7b206756115aa9df5e0599fc5169742)) +$(eval $(call CheckExpectedSrcVer,../libnetwork/Makefile,fc5a7d91d54cc98f64fc28f9e288b46a0bee756c)) $(eval $(call CheckExpectedSrcVer,../runc/Makefile,425e105d5a03fabd737a126ad93d62a9eeede87f)) $(eval $(call CheckExpectedSrcVer,../tini/Makefile,fec3683b971d9c3ef73f284f176672c44b448662)) @@ -116,6 +116,9 @@ define Package/docker-ce/install $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) ./files/dockerd.init $(1)/etc/init.d/dockerd + + $(INSTALL_DIR) $(1)/etc/docker + $(INSTALL_CONF) ./files/daemon.json $(1)/etc/docker/ endef $(eval $(call BuildPackage,docker-ce)) diff --git a/utils/docker-ce/files/daemon.json b/utils/docker-ce/files/daemon.json new file mode 100644 index 000000000..4084c23a0 --- /dev/null +++ b/utils/docker-ce/files/daemon.json @@ -0,0 +1,3 @@ +{ + "data-root": "/opt/docker/" +} diff --git a/utils/docker-ce/files/dockerd.init b/utils/docker-ce/files/dockerd.init index bf1bd8ab6..d53c17178 100644 --- a/utils/docker-ce/files/dockerd.init +++ b/utils/docker-ce/files/dockerd.init @@ -3,9 +3,8 @@ USE_PROCD=1 START=25 -# docker can't run with a symlink in the path so we just use /opt/docker/ start_service() { procd_open_instance - procd_set_param command /usr/bin/dockerd -g /opt/docker/ + procd_set_param command /usr/bin/dockerd procd_close_instance } diff --git a/utils/dump1090/Makefile b/utils/dump1090/Makefile index 2758dc4d2..b52ae294b 100644 --- a/utils/dump1090/Makefile +++ b/utils/dump1090/Makefile @@ -8,21 +8,20 @@ include $(TOPDIR)/rules.mk PKG_NAME:=dump1090 -PKG_VERSION:=2017-06-01 +PKG_VERSION:=3.7.1 PKG_RELEASE:=1 PKG_SOURCE_PROTO:=git -PKG_SOURCE_URL:=https://github.com/mutability/dump1090.git -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_SOURCE_VERSION:=b155fdb458c3241ab375d1f2b12fbb6a9f8a8a3a -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz -PKG_MIRROR_HASH:=bea40197bce22c837273848bc75d273c2688b131c11895685a35ee6c6242843a -PKG_MAINTAINER:=Álvaro Fernández Rojas <noltari@gmail.com> +PKG_SOURCE_URL:=https://github.com/flightaware/dump1090 +PKG_SOURCE_VERSION:=v$(PKG_VERSION) +PKG_MIRROR_HASH:=d7ed250d624eae2eec6c0a2dd410986f42230bf929dab67893ea3bf1cab8a203 -PKG_LICENSE:=GPL-2.0 +PKG_MAINTAINER:=Álvaro Fernández Rojas <noltari@gmail.com> +PKG_LICENSE:=GPL-2.0-or-later PKG_LICENSE_FILES:=COPYING LICENSE PKG_BUILD_DEPENDS:=libusb +PKG_BUILD_PARALLEL:=1 include $(INCLUDE_DIR)/package.mk @@ -31,18 +30,18 @@ define Package/dump1090/Default CATEGORY:=Utilities TITLE:=Mode S decoder for the Realtek RTL2832U URL:=https://github.com/mutability/dump1090 - DEPENDS:=+libpthread endef define Package/dump1090 $(call Package/dump1090/Default) TITLE+= (dump1090) - DEPENDS+= +librtlsdr +uhttpd + DEPENDS+=+librtlsdr +uhttpd +libncurses endef define Package/view1090 $(call Package/dump1090/Default) TITLE+= (view1090) + DEPENDS+=+libncurses endef define Package/dump1090/description @@ -54,9 +53,13 @@ define Package/view1090/description endef MAKE_FLAGS += \ + BLADERF=no \ CFLAGS="$(TARGET_CFLAGS)" \ UNAME="Linux" +TARGET_CFLAGS += -ffunction-sections -fdata-sections -flto +TARGET_LDFLAGS += -Wl,--gc-sections,--as-needed + define Package/dump1090/install $(INSTALL_DIR) $(1)/etc/init.d $(INSTALL_BIN) files/dump1090.init $(1)/etc/init.d/dump1090 diff --git a/utils/dump1090/files/dump1090.init b/utils/dump1090/files/dump1090.init index d57a2e198..89c333322 100644 --- a/utils/dump1090/files/dump1090.init +++ b/utils/dump1090/files/dump1090.init @@ -13,7 +13,9 @@ append_arg() { local val config_get val "$cfg" "$var" - [ -n "$val" -o -n "$def" ] && procd_append_param command $opt "${val:-$def}" + if [ -n "$val" ] || [ -n "$def" ]; then + procd_append_param command "$opt" "${val:-$def}" + fi } append_bool() { @@ -82,7 +84,7 @@ start_instance() { append_arg "$cfg" html_dir "--html-dir" append_arg "$cfg" write_json "--write-json" config_get aux "$cfg" "write_json" - [ -n "$aux" ] && mkdir -p $aux + [ -n "$aux" ] && mkdir -p "$aux" append_arg "$cfg" write_json_every "--write-json-every" append_arg "$cfg" json_location_accuracy "--json-location-accuracy" append_bool "$cfg" oversample "--oversample" diff --git a/utils/ecdsautils/Makefile b/utils/ecdsautils/Makefile index 7f1c76f03..b4fe3208a 100644 --- a/utils/ecdsautils/Makefile +++ b/utils/ecdsautils/Makefile @@ -10,14 +10,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=ecdsautils PKG_VERSION:=0.3.2.20160630 PKG_RELEASE:=1 -PKG_REV:=07538893fb6c2a9539678c45f9dbbf1e4f222b46 -PKG_MAINTAINER:=Matthias Schiffer <mschiffer@universe-factory.net> -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.xz -PKG_MIRROR_HASH:=397395a471c0b5af1a173666ba21a5bedb4c3423a6e37c545c3627bed73dcb76 -PKG_SOURCE_URL:=git://github.com/tcatm/$(PKG_NAME).git -PKG_SOURCE_VERSION:=$(PKG_REV) -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) + PKG_SOURCE_PROTO:=git +PKG_SOURCE_URL:=https://github.com/freifunk-gluon/ecdsautils +PKG_SOURCE_VERSION:=07538893fb6c2a9539678c45f9dbbf1e4f222b46 +PKG_MIRROR_HASH:=397395a471c0b5af1a173666ba21a5bedb4c3423a6e37c545c3627bed73dcb76 + +PKG_MAINTAINER:=Matthias Schiffer <mschiffer@universe-factory.net> +PKG_LICENSE_FILES:=COPYRIGHT + +PKG_BUILD_PARALLEL:=1 +CMAKE_INSTALL:=1 include $(INCLUDE_DIR)/package.mk include $(INCLUDE_DIR)/cmake.mk @@ -28,6 +31,7 @@ define Package/libecdsautil DEPENDS:=+libuecc TITLE:=ECDSA library URL:=https://github.com/tcatm/ecdsautils + LICENSE:=MIT endef define Package/ecdsautils @@ -36,12 +40,9 @@ define Package/ecdsautils DEPENDS:=+libecdsautil +libuecc TITLE:=ECDSA Utilities URL:=https://github.com/tcatm/ecdsautils + LICENSE:=BSD-2-Clause endef -CMAKE_OPTIONS += \ - -DCMAKE_BUILD_TYPE:String="MINSIZEREL" \ - - define Package/libecdsautil/description Library to sign and verify checksums using ECDSA. endef @@ -50,6 +51,9 @@ define Package/ecdsautils/description Utilities to sign and verify checksums using ECDSA. endef +CMAKE_OPTIONS += \ + -DCMAKE_BUILD_TYPE:String="MINSIZEREL" + define Package/libecdsautil/install $(INSTALL_DIR) $(1)/usr/lib/ $(CP) $(PKG_INSTALL_DIR)/usr/lib/libecdsautil.so* $(1)/usr/lib/ @@ -63,11 +67,5 @@ define Package/ecdsautils/install $(CP) $(PKG_INSTALL_DIR)/usr/bin/ecdsaverify $(1)/usr/bin/ endef -define Build/InstallDev - $(INSTALL_DIR) $(1)/usr/ - $(CP) $(PKG_INSTALL_DIR)/usr/include $(1)/usr/ - $(CP) $(PKG_INSTALL_DIR)/usr/lib $(1)/usr/ -endef - $(eval $(call BuildPackage,libecdsautil)) $(eval $(call BuildPackage,ecdsautils)) diff --git a/utils/findutils/Makefile b/utils/findutils/Makefile index 2463fc6ee..74861029d 100644 --- a/utils/findutils/Makefile +++ b/utils/findutils/Makefile @@ -1,4 +1,4 @@ -# +# # Copyright (C) 2006-2016 OpenWrt.org # # This is free software, licensed under the GNU General Public License v2. @@ -8,14 +8,15 @@ include $(TOPDIR)/rules.mk PKG_NAME:=findutils PKG_VERSION:=4.6.0 -PKG_RELEASE:=3 - -PKG_LICENSE:=GPL-3.0+ +PKG_RELEASE:=4 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=@GNU/$(PKG_NAME) PKG_HASH:=ded4c9f73731cd48fec3b6bdaccce896473b6d8e337e9612e16cf1431bb1169d + PKG_MAINTAINER:=Daniel Dickinson <cshored@thecshore.com> +PKG_LICENSE:=GPL-3.0-or-later +PKG_LICENSE_FILES:=COPYING PKG_BUILD_PARALLEL:=1 PKG_INSTALL:=1 diff --git a/utils/findutils/patches/010-sysmacros.patch b/utils/findutils/patches/010-sysmacros.patch new file mode 100644 index 000000000..dc3678d0a --- /dev/null +++ b/utils/findutils/patches/010-sysmacros.patch @@ -0,0 +1,11 @@ +--- a/gl/lib/mountlist.c ++++ b/gl/lib/mountlist.c +@@ -33,6 +33,8 @@ + + #include <unistd.h> + ++#include <sys/sysmacros.h> ++ + #if HAVE_SYS_PARAM_H + # include <sys/param.h> + #endif diff --git a/utils/gddrescue/Makefile b/utils/gddrescue/Makefile index 527fca23b..192c26500 100644 --- a/utils/gddrescue/Makefile +++ b/utils/gddrescue/Makefile @@ -6,7 +6,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=gddrescue PKG_VERSION:=1.23 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_SOURCE:=$(PKG_NAME)_$(PKG_VERSION).orig.tar.xz PKG_SOURCE_URL:=http://http.debian.net/debian/pool/main/g/$(PKG_NAME) @@ -27,7 +27,7 @@ define Package/gddrescue CATEGORY:=Utilities TITLE:=Data recovery tool URL:=https://www.gnu.org/software/ddrescue/ - DEPENDS:=$(CXX_DEPENDS) @!USE_UCLIBC + DEPENDS:=$(CXX_DEPENDS) endef define Package/gddrescue/description diff --git a/utils/gddrescue/patches/010-fix-uclibcxx.patch b/utils/gddrescue/patches/010-fix-uclibcxx.patch new file mode 100644 index 000000000..66e8e7671 --- /dev/null +++ b/utils/gddrescue/patches/010-fix-uclibcxx.patch @@ -0,0 +1,102 @@ +--- a/fillbook.cc ++++ b/fillbook.cc +@@ -31,6 +31,9 @@ + #include "block.h" + #include "mapbook.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fputc ++#endif + + // Return values: 1 write error, 0 OK. + // +--- a/genbook.cc ++++ b/genbook.cc +@@ -31,6 +31,9 @@ + #include "block.h" + #include "mapbook.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fputc ++#endif + + const char * format_time( const long t, const bool low_prec ) + { +--- a/loggers.cc ++++ b/loggers.cc +@@ -25,6 +25,9 @@ + #include "block.h" + #include "loggers.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fputc ++#endif + + namespace { + +--- a/main.cc ++++ b/main.cc +@@ -46,6 +46,11 @@ + #include "non_posix.h" + #include "rescuebook.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fgetc ++#undef fputc ++#endif ++ + #ifndef O_BINARY + #define O_BINARY 0 + #endif +--- a/main_common.cc ++++ b/main_common.cc +@@ -15,6 +15,10 @@ + along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fputc ++#endif ++ + int verbosity = 0; + + namespace { +--- a/mapbook.cc ++++ b/mapbook.cc +@@ -32,6 +32,10 @@ + #include "block.h" + #include "mapbook.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fgetc ++#undef fputc ++#endif + + namespace { + +--- a/mapfile.cc ++++ b/mapfile.cc +@@ -29,6 +29,11 @@ + + #include "block.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fgetc ++#undef ferror ++#undef feof ++#endif + + namespace { + +--- a/rescuebook.cc ++++ b/rescuebook.cc +@@ -36,6 +36,9 @@ + #include "mapbook.h" + #include "rescuebook.h" + ++#ifdef __UCLIBCXX_MAJOR__ ++#undef fputc ++#endif + + namespace { + diff --git a/utils/gpsd/Makefile b/utils/gpsd/Makefile index dd9d9a9d5..55c975d0a 100644 --- a/utils/gpsd/Makefile +++ b/utils/gpsd/Makefile @@ -8,12 +8,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=gpsd -PKG_VERSION:=3.17 -PKG_RELEASE:=3 +PKG_VERSION:=3.19 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=@SAVANNAH/$(PKG_NAME) -PKG_HASH:=68e0dbecfb5831997f8b3d6ba48aed812eb465d8c0089420ab68f9ce4d85e77a +PKG_HASH:=27dd24d45b2ac69baab7933da2bf6ae5fb0be90130f67e753c110a3477155f39 PKG_MAINTAINER:=Pushpal Sidhu <psidhu.devel@gmail.com> PKG_LICENSE:=BSD-3-Clause @@ -24,7 +24,7 @@ include $(INCLUDE_DIR)/scons.mk define Package/gpsd/Default DEPENDS+= +libusb-1.0 - URL:=http://catb.org/gpsd/ + URL:=https://gpsd.gitlab.io/gpsd/ endef define Package/gpsd/Default/description diff --git a/utils/gpsd/patches/010-musl.patch b/utils/gpsd/patches/010-musl.patch deleted file mode 100644 index a2ba95112..000000000 --- a/utils/gpsd/patches/010-musl.patch +++ /dev/null @@ -1,40 +0,0 @@ ---- a/driver_ais.c -+++ b/driver_ais.c -@@ -24,6 +24,7 @@ - - /* strlcpy() needs _DARWIN_C_SOURCE */ - #define _DARWIN_C_SOURCE -+#define _BSD_SOURCE - - #include <stdlib.h> - #include <string.h> ---- a/gpsctl.c -+++ b/gpsctl.c -@@ -15,6 +15,7 @@ - - /* strlcpy() needs _DARWIN_C_SOURCE */ - #define _DARWIN_C_SOURCE -+#define _BSD_SOURCE - - #include <stdio.h> - #include <stdlib.h> ---- a/gpsd_json.c -+++ b/gpsd_json.c -@@ -24,6 +24,7 @@ PERMISSIONS - #define __DARWIN_C_LEVEL 200112L - /* strlcpy() needs _DARWIN_C_SOURCE */ - #define _DARWIN_C_SOURCE -+#define _BSD_SOURCE - - #include <stdio.h> - #include <math.h> ---- a/libgpsd_core.c -+++ b/libgpsd_core.c -@@ -20,6 +20,7 @@ - - /* strlcpy() needs _DARWIN_C_SOURCE */ - #define _DARWIN_C_SOURCE -+#define _BSD_SOURCE - - #include <time.h> - #include <stdio.h> diff --git a/utils/lcdproc/Makefile b/utils/lcdproc/Makefile index 0260cf4a4..00fd460ba 100644 --- a/utils/lcdproc/Makefile +++ b/utils/lcdproc/Makefile @@ -9,18 +9,17 @@ include $(TOPDIR)/rules.mk PKG_NAME:=lcdproc PKG_VERSION:=0.5.9 -PKG_RELEASE:=2 +PKG_RELEASE:=3 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://github.com/lcdproc/lcdproc/releases/download/v$(PKG_VERSION)/ PKG_HASH:=d48a915496c96ff775b377d2222de3150ae5172bfb84a6ec9f9ceab962f97b83 + PKG_MAINTAINER:=Harald Geyer <harald@ccbib.org>, \ Philip Prindeville <philipp@redfish-solutions.com> -PKG_LICENSE:=GPL-2.0 +PKG_LICENSE:=GPL-2.0-only PKG_LICENSE_FILES:=COPYING -PKG_FIXUP:=autoreconf - include $(INCLUDE_DIR)/package.mk define Package/lcdproc/Default @@ -109,10 +108,6 @@ This package contains display drivers with external dependencies: $(LCDPROC_OTHER_DRIVERS_TEXT) endef - -# not everything groks --disable-nls -DISABLE_NLS:= - CONFIGURE_ARGS += \ --disable-libX11 \ --disable-libhid \ @@ -120,9 +115,6 @@ CONFIGURE_ARGS += \ --disable-freetype \ --enable-drivers='all,!g15,!g15driver,!glcdlib,!irman,!lirc,!mdm166a,!mx5000,!svga,!xosd' -# can't use -Wformat=2 because MUSL is somewhat broken -TARGET_CFLAGS+=-Wall - MAKE_FLAGS += \ CFLAGS="$(TARGET_CFLAGS)" \ LDFLAGS="$(TARGET_LDFLAGS)" diff --git a/utils/lcdproc/patches/110-in-outb.patch b/utils/lcdproc/patches/110-in-outb.patch new file mode 100644 index 000000000..7bf34eb30 --- /dev/null +++ b/utils/lcdproc/patches/110-in-outb.patch @@ -0,0 +1,11 @@ +--- a/server/drivers/port.h ++++ b/server/drivers/port.h +@@ -94,7 +94,7 @@ static inline int port_deny_multiple(unsigned short port, unsigned short count); + /* ---------------------------- Linux ------------------------------------ */ + /* Use ioperm, inb and outb in <sys/io.h> (Linux) */ + /* And iopl for higher addresses of PCI LPT cards */ +-#if defined HAVE_IOPERM ++#if defined(__GLIBC__) || (defined(__x86__) || defined(__x86_64__)) + + /* Glibc2 and Glibc1 */ + # ifdef HAVE_SYS_IO_H diff --git a/utils/libnetwork/Makefile b/utils/libnetwork/Makefile index ccd3b76db..28bfbb244 100644 --- a/utils/libnetwork/Makefile +++ b/utils/libnetwork/Makefile @@ -1,8 +1,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=libnetwork -PKG_VERSION:=0.8.0-dev.2 -PKG_RELEASE:=1 +PKG_RELEASE:=2 PKG_LICENSE:=Apache-2.0 PKG_LICENSE_FILES:=LICENSE @@ -13,9 +12,9 @@ GO_PKG_BUILD_PKG:= \ PKG_SOURCE_PROTO:=git PKG_SOURCE_URL:=https://$(GO_PKG) -PKG_SOURCE_VERSION:=e7933d41e7b206756115aa9df5e0599fc5169742 -PKG_MIRROR_HASH:=48638648bfd2b249f8e9cc32b5ec295a64e61fcb7cf635ca1a88809662167374 -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz +PKG_SOURCE_VERSION:=fc5a7d91d54cc98f64fc28f9e288b46a0bee756c +PKG_SOURCE_DATE:=2019-08-03 +PKG_MIRROR_HASH:=a54fe7456ce448efc33d274ac8f2115d25196d7d58c44e5227bb0846b29b7fcd PKG_MAINTAINER:=Gerard Ryan <G.M0N3Y.2503@gmail.com> @@ -30,7 +29,7 @@ define Package/libnetwork CATEGORY:=Utilities TITLE:=networking for containers URL:=https://github.com/docker/libnetwork - DEPENDS:=$(GO_ARCH_DEPENDS) + DEPENDS:=$(GO_ARCH_DEPENDS) @TARGET_x86_64 endef define Package/libnetwork/description diff --git a/utils/mariadb/Makefile b/utils/mariadb/Makefile index badba0725..df26bc568 100644 --- a/utils/mariadb/Makefile +++ b/utils/mariadb/Makefile @@ -8,8 +8,8 @@ include $(TOPDIR)/rules.mk PKG_NAME:=mariadb -PKG_VERSION:=10.2.24 -PKG_RELEASE:=2 +PKG_VERSION:=10.2.26 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL := \ @@ -18,10 +18,10 @@ PKG_SOURCE_URL := \ https://ftp.yz.yamagata-u.ac.jp/pub/dbms/mariadb/$(PKG_NAME)-$(PKG_VERSION)/source \ https://downloads.mariadb.org/interstitial/$(PKG_NAME)-$(PKG_VERSION)/source -PKG_HASH:=97f4d924e69f77abb2f650116785c2f5ef356230442534ebcbaadb51d9bb8bc4 +PKG_HASH:=152fe941c4f2a352b2b3a4db1ef64e70235fd9ff055af62ad7bda9f2b2191528 PKG_MAINTAINER:=Sebastian Kemper <sebastian_ml@gmx.net> PKG_LICENSE:=GPL-2.0 LGPL-2.1 -PKG_LICENSE_FILES:=COPYING libmariadb/COPYING.LIB +PKG_LICENSE_FILES:=COPYING THIRDPARTY libmariadb/COPYING.LIB PKG_CPE_ID:=cpe:/a:mariadb:mariadb @@ -65,7 +65,6 @@ MARIADB_SERVER_PLUGINS := \ auth_ed25519 \ auth_gssapi \ auth_pam \ - client_ed25519 \ disks \ feedback \ file_key_management \ @@ -98,7 +97,6 @@ plugin-auth_gssapi_client := PLUGIN_AUTH_GSSAPI_CLIENT plugin-auth_ed25519 := PLUGIN_AUTH_ED25519 plugin-auth_gssapi := PLUGIN_AUTH_GSSAPI plugin-auth_pam := PLUGIN_AUTH_PAM -plugin-client_ed25519 := PLUGIN_CLIENT_ED25519 plugin-disks := PLUGIN_DISKS plugin-feedback := PLUGIN_FEEDBACK plugin-file_key_management := PLUGIN_FILE_KEY_MANAGEMENT @@ -523,6 +521,7 @@ define Package/libmariadb/install $(INSTALL_DIR) $(1)$(PLUGIN_DIR) $(CP) $(PKG_INSTALL_DIR)/usr/lib/lib{mariadb,mysqlclient}*.so* $(1)/usr/lib $(INSTALL_BIN) $(PKG_INSTALL_DIR)$(PLUGIN_DIR)/caching_sha2_password.so $(1)$(PLUGIN_DIR) + $(INSTALL_BIN) $(PKG_INSTALL_DIR)$(PLUGIN_DIR)/client_ed25519.so $(1)$(PLUGIN_DIR) $(INSTALL_BIN) $(PKG_INSTALL_DIR)$(PLUGIN_DIR)/dialog.so $(1)$(PLUGIN_DIR) $(INSTALL_BIN) $(PKG_INSTALL_DIR)$(PLUGIN_DIR)/mysql_clear_password.so $(1)$(PLUGIN_DIR) $(INSTALL_BIN) $(PKG_INSTALL_DIR)$(PLUGIN_DIR)/sha256_password.so $(1)$(PLUGIN_DIR) @@ -617,7 +616,6 @@ $(eval $(call BuildPlugin,libmariadb,auth_gssapi_client,+krb5-libs)) $(eval $(call BuildPlugin,mariadb-server,auth_ed25519,)) $(eval $(call BuildPlugin,mariadb-server,auth_gssapi,+krb5-libs)) $(eval $(call BuildPlugin,mariadb-server,auth_pam,+libpam)) -$(eval $(call BuildPlugin,mariadb-server,client_ed25519,)) $(eval $(call BuildPlugin,mariadb-server,disks,)) $(eval $(call BuildPlugin,mariadb-server,feedback,)) $(eval $(call BuildPlugin,mariadb-server,file_key_management,)) diff --git a/utils/mariadb/patches/100-fix_hostname.patch b/utils/mariadb/patches/100-fix_hostname.patch index 81cef724c..ae1ce0e22 100644 --- a/utils/mariadb/patches/100-fix_hostname.patch +++ b/utils/mariadb/patches/100-fix_hostname.patch @@ -1,6 +1,6 @@ --- a/scripts/mysql_install_db.sh +++ b/scripts/mysql_install_db.sh -@@ -403,7 +403,7 @@ fi +@@ -410,7 +410,7 @@ fi # Try to determine the hostname diff --git a/utils/mariadb/patches/130-c11_atomics.patch b/utils/mariadb/patches/130-c11_atomics.patch index e7dad179e..00a636305 100644 --- a/utils/mariadb/patches/130-c11_atomics.patch +++ b/utils/mariadb/patches/130-c11_atomics.patch @@ -46,7 +46,7 @@ Author: Vicențiu Ciorbaru <vicentiu@mariadb.org> +++ b/include/atomic/gcc_builtins.h @@ -16,6 +16,7 @@ along with this program; if not, write to the Free Software - Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ + Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA */ +#if defined (HAVE_GCC_ATOMIC_BUILTINS) #define make_atomic_add_body(S) \ diff --git a/utils/mariadb/patches/140-mips-connect-unaligned.patch b/utils/mariadb/patches/140-mips-connect-unaligned.patch index c2b58778d..501eb53ca 100644 --- a/utils/mariadb/patches/140-mips-connect-unaligned.patch +++ b/utils/mariadb/patches/140-mips-connect-unaligned.patch @@ -189,7 +189,7 @@ This patch header follows DEP-3: http://dep.debian.net/deps/dep3/ CheckType(pv) TYPE *lp = ((TYPBLK*)pv)->Typp; -- for (register int i = k; i < n; i++) // TODO +- for (int i = k; i < n; i++) // TODO - Typp[i] = lp[i]; + memcpy(Typp + k, lp + k, sizeof(TYPE) * n); diff --git a/utils/mariadb/patches/170-ppc-remove-glibc-dep.patch b/utils/mariadb/patches/170-ppc-remove-glibc-dep.patch index 198d5f10c..84637a47d 100644 --- a/utils/mariadb/patches/170-ppc-remove-glibc-dep.patch +++ b/utils/mariadb/patches/170-ppc-remove-glibc-dep.patch @@ -27,7 +27,7 @@ directly was the first solution adopted in MariaDB [2]. --- a/storage/xtradb/include/ut0ut.h +++ b/storage/xtradb/include/ut0ut.h -@@ -85,9 +85,8 @@ private: +@@ -83,9 +83,8 @@ private: the YieldProcessor macro defined in WinNT.h. It is a CPU architecture- independent way by using YieldProcessor. */ # define UT_RELAX_CPU() YieldProcessor() @@ -39,7 +39,7 @@ directly was the first solution adopted in MariaDB [2]. # else # define UT_RELAX_CPU() ((void)0) /* avoid warning for an empty statement */ # endif -@@ -101,9 +100,8 @@ private: +@@ -99,9 +98,8 @@ private: #endif # if defined(HAVE_HMT_PRIORITY_INSTRUCTION) @@ -53,7 +53,7 @@ directly was the first solution adopted in MariaDB [2]. # define UT_RESUME_PRIORITY_CPU() ((void)0) --- a/storage/innobase/include/ut0ut.h +++ b/storage/innobase/include/ut0ut.h -@@ -71,9 +71,8 @@ typedef time_t ib_time_t; +@@ -68,9 +68,8 @@ Created 1/20/1994 Heikki Tuuri the YieldProcessor macro defined in WinNT.h. It is a CPU architecture- independent way by using YieldProcessor. */ # define UT_RELAX_CPU() YieldProcessor() @@ -65,7 +65,7 @@ directly was the first solution adopted in MariaDB [2]. #else # define UT_RELAX_CPU() do { \ volatile int32 volatile_var; \ -@@ -91,9 +90,8 @@ typedef time_t ib_time_t; +@@ -88,9 +87,8 @@ Created 1/20/1994 Heikki Tuuri #endif #if defined(HAVE_HMT_PRIORITY_INSTRUCTION) diff --git a/utils/mariadb/patches/180-libedit.patch b/utils/mariadb/patches/180-libedit.patch index 522792850..edf1ea7f8 100644 --- a/utils/mariadb/patches/180-libedit.patch +++ b/utils/mariadb/patches/180-libedit.patch @@ -24,7 +24,7 @@ Date: Sun Dec 9 21:19:24 2018 +0100 --- a/client/mysql.cc +++ b/client/mysql.cc -@@ -2577,7 +2577,7 @@ C_MODE_END +@@ -2578,7 +2578,7 @@ C_MODE_END if not. */ @@ -33,7 +33,7 @@ Date: Sun Dec 9 21:19:24 2018 +0100 static int fake_magic_space(int, int); extern "C" char *no_completion(const char*,int) #elif defined(USE_LIBEDIT_INTERFACE) -@@ -2659,7 +2659,7 @@ static int not_in_history(const char *li +@@ -2660,7 +2660,7 @@ static int not_in_history(const char *li } @@ -42,7 +42,7 @@ Date: Sun Dec 9 21:19:24 2018 +0100 static int fake_magic_space(int, int) #else static int fake_magic_space(const char *, int) -@@ -2676,7 +2676,7 @@ static void initialize_readline (char *n +@@ -2677,7 +2677,7 @@ static void initialize_readline (char *n rl_readline_name = name; /* Tell the completer that we want a crack first. */ @@ -51,7 +51,7 @@ Date: Sun Dec 9 21:19:24 2018 +0100 rl_attempted_completion_function= (rl_completion_func_t*)&new_mysql_completion; rl_completion_entry_function= (rl_compentry_func_t*)&no_completion; -@@ -2706,7 +2706,7 @@ static char **new_mysql_completion(const +@@ -2707,7 +2707,7 @@ static char **new_mysql_completion(const int end __attribute__((unused))) { if (!status.batch && !quick) diff --git a/utils/mt-st/Makefile b/utils/mt-st/Makefile index aab5e751e..71bfdcf99 100644 --- a/utils/mt-st/Makefile +++ b/utils/mt-st/Makefile @@ -9,14 +9,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=mt-st PKG_VERSION:=1.1 -PKG_RELEASE:=1 +PKG_RELEASE:=2 -PKG_SOURCE_SUBDIR:=$(PKG_NAME) -PKG_SOURCE_VERSION:=$(PKG_VERSION) -PKG_SOURCE:=$(PKG_NAME)-$(PKG_SOURCE_VERSION).tar.gz +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=ftp://ftp.ibiblio.org/pub/Linux/system/backup/ PKG_HASH:=945cb4f3d9957dabe768f5941a9148b746396836c797b25f020c84319ba8170d +PKG_MAINTAINER:=Giuseppe Magnotta <giuseppe.magnotta@gmail.com> PKG_LICENSE:=GPL-2.0 include $(INCLUDE_DIR)/package.mk @@ -26,7 +25,6 @@ define Package/mt-st CATEGORY:=Utilities TITLE:=Magnetic tape control tools for Linux SCSI tapes URL:=http://ftp.ibiblio.org/pub/Linux/system/backup/ - MAINTAINER:=Giuseppe Magnotta <giuseppe.magnotta@gmail.com> endef define Package/mt-st/description diff --git a/utils/mt-st/patches/010-sysmacros.patch b/utils/mt-st/patches/010-sysmacros.patch new file mode 100644 index 000000000..8e4b37378 --- /dev/null +++ b/utils/mt-st/patches/010-sysmacros.patch @@ -0,0 +1,10 @@ +--- a/mt.c ++++ b/mt.c +@@ -21,6 +21,7 @@ + #include <sys/types.h> + #include <sys/ioctl.h> + #include <sys/stat.h> ++#include <sys/sysmacros.h> + #include <sys/utsname.h> + + #include "mtio.h" diff --git a/utils/prometheus-node-exporter-lua/Makefile b/utils/prometheus-node-exporter-lua/Makefile index 83a5c06cf..fcc5b589d 100644 --- a/utils/prometheus-node-exporter-lua/Makefile +++ b/utils/prometheus-node-exporter-lua/Makefile @@ -4,7 +4,7 @@ include $(TOPDIR)/rules.mk PKG_NAME:=prometheus-node-exporter-lua -PKG_VERSION:=2019.04.12 +PKG_VERSION:=2019.08.14 PKG_RELEASE:=1 PKG_MAINTAINER:=Etienne CHAMPETIER <champetier.etienne@gmail.com> @@ -79,7 +79,7 @@ endef define Package/prometheus-node-exporter-lua-openwrt $(call Package/prometheus-node-exporter-lua/Default) TITLE+= (openwrt collector) - DEPENDS:=prometheus-node-exporter-lua + DEPENDS:=prometheus-node-exporter-lua +libubus-lua endef define Package/prometheus-node-exporter-lua-ltq-dsl diff --git a/utils/prometheus-node-exporter-lua/files/usr/lib/lua/prometheus-collectors/openwrt.lua b/utils/prometheus-node-exporter-lua/files/usr/lib/lua/prometheus-collectors/openwrt.lua index 8b9785a27..8d44792b9 100644 --- a/utils/prometheus-node-exporter-lua/files/usr/lib/lua/prometheus-collectors/openwrt.lua +++ b/utils/prometheus-node-exporter-lua/files/usr/lib/lua/prometheus-collectors/openwrt.lua @@ -1,20 +1,20 @@ +local ubus = require "ubus" +local u = ubus.connect() +local b = u:call("system", "board", {}) + local labels = { - id = "", - release = "", - revision = "", - model = string.sub(get_contents("/tmp/sysinfo/model"), 1, -2), - board_name = string.sub(get_contents("/tmp/sysinfo/board_name"), 1, -2) + board_name = b.board_name, + id = b.release.distribution, + model = b.model, + release = b.release.version, + revision = b.release.revision, + system = b.system, + target = b.release.target } -for k, v in string.gmatch(get_contents("/etc/openwrt_release"), "(DISTRIB_%w+)='(.-)'\n") do - if k == "DISTRIB_ID" then - labels["id"] = v - elseif k == "DISTRIB_RELEASE" then - labels["release"] = v - elseif k == "DISTRIB_REVISION" then - labels["revision"] = v - end -end +b = nil +u = nil +ubus = nil local function scrape() metric("node_openwrt_info", "gauge", labels, 1) diff --git a/utils/pservice/Makefile b/utils/pservice/Makefile new file mode 100644 index 000000000..56a2ac097 --- /dev/null +++ b/utils/pservice/Makefile @@ -0,0 +1,28 @@ +# Copyright (C) 2017 Yousong Zhou + +include $(TOPDIR)/rules.mk + +PKG_NAME:=pservice +PKG_VERSION:=2017-08-29 +PKG_RELEASE=2 + +PKG_MAINTAINER:=Yousong Zhou <yszhou4tech@gmail.com> + +include $(INCLUDE_DIR)/package.mk + +define Package/pservice + SECTION:=utils + CATEGORY:=Utilities + TITLE:=Wrap commands as procd services +endef + +define Build/Compile +endef + +define Package/pservice/install + $(INSTALL_DIR) $(1)/usr/bin $(1)/etc/init.d $(1)/etc/config + $(INSTALL_BIN) ./files/pservice.init $(1)/etc/init.d/pservice + $(INSTALL_DATA) ./files/pservice.config $(1)/etc/config/pservice +endef + +$(eval $(call BuildPackage,pservice)) diff --git a/utils/pservice/README.md b/utils/pservice/README.md new file mode 100644 index 000000000..85fbd6c42 --- /dev/null +++ b/utils/pservice/README.md @@ -0,0 +1,44 @@ +# uci + +`disabled`, bool, default `0` + +`name`, string, name of the service instance + +`command`, file, the service instance executable + +`args`, list of args + +`stderr`, bool, default `0`, log stderr output of the service instance + +`stdout`, bool, default `0`, log stdout output of the service instance + +`env`, list of environment variable settings of the form `var=val` + +`file`, list of file names. Service instances will be restarted if content of +these files have changed on service reload event. + +`respawn_threshold`, uinteger, default `3600`, time in seconds the instances +have to be in running state to be considered a valid run + +`respawn_timeout`, uinteger, default `5`, time in seconds the instance should +be delayed to start again after the last crash + +`respawn_maxfail`, uinteger, default `5`, maximum times the instances can +crash/fail in a row and procd will not try to bring it up again after this +limit has been reached + +# notes and faq + +Initial environment variables presented to service instances may be different +from what was observed on the interactive terminal. E.g. `HOME=/` may affect +reading `~/.ssh/known_hosts` of dropbear ssh instance. + + PATH=/usr/sbin:/usr/bin:/sbin:/bin PWD=/ HOME=/ + +If `list args xxx` seems to be too long causing pain, consider using `/bin/sh` +as the `command`. It is also worth noting that uci supports multi-line option +value. + +Child processes will keep running when their parent process was killed. This +is especially the case and should be taken into account with option `command` +being `/bin/sh` and it is recommended to use `exec` as the last shell command. diff --git a/utils/pservice/files/pservice.config b/utils/pservice/files/pservice.config new file mode 100644 index 000000000..0f505b398 --- /dev/null +++ b/utils/pservice/files/pservice.config @@ -0,0 +1,24 @@ +config pservice + option disabled 1 + option name 'demo0' + option command /bin/sh + option respawn_maxfail 0 + list args -c + list args 'env | logger -t $name; exec sleep $time' + list env 'v0=0' + list env 'v1=val with space' + list env 'name=demo0' + list env 'time=1799' + list file /tmp/sleep.conf + +config pservice + option disabled 1 + option name 8021x + option command /usr/sbin/wpa_supplicant + option stdout 1 + list args -i + list args eth0.1 + list args -D + list args wired + list args -c + list args /etc/wpa_supplicant-eth0.1.conf diff --git a/utils/pservice/files/pservice.init b/utils/pservice/files/pservice.init new file mode 100755 index 000000000..3a3ce022c --- /dev/null +++ b/utils/pservice/files/pservice.init @@ -0,0 +1,63 @@ +#!/bin/sh /etc/rc.common +# Copyright (C) 2017-2019 Yousong Zhou + +START=99 + +USE_PROCD=1 + +pservice_list_cb() { + local val="$1"; shift + local param="$1"; shift + + procd_append_param "$param" "$val" +} + +pservice_instance() { + local cfg="$1" + + [ "$disabled" = 0 ] || return 0 + [ -x "$command" ] || { + echo "$command is not executable" >&2 + return 1 + } + + procd_open_instance "$name" + procd_set_param command "$command" + procd_set_param stderr "$stderr" + procd_set_param stdout "$stdout" + procd_set_param respawn "$respawn_threshold" "$respawn_timeout" "$respawn_maxfail" + [ -z "$args" ] || config_list_foreach "$cfg" args pservice_list_cb command + if [ -n "$env" ]; then + procd_set_param env + config_list_foreach "$cfg" env pservice_list_cb env + fi + if [ -n "$file" ]; then + procd_set_param file + config_list_foreach "$cfg" file pservice_list_cb file + fi + procd_close_instance +} + +start_service() { + config_load 'pservice' + config_foreach validate_pservice_section pservice pservice_instance +} + +service_triggers() { + procd_add_validation validate_pservice_section +} + +validate_pservice_section() { + uci_load_validate pservice pservice "$1" "$2" \ + "disabled:bool:0" \ + "name:string" \ + "env:regex('^[a-zA-Z_][a-zA-Z0-9_]*=.*$')" \ + "command:file" \ + "args:string" \ + "stderr:bool:0" \ + "stdout:bool:0" \ + "respawn_threshold:uinteger:3600" \ + "respawn_timeout:uinteger:5" \ + "respawn_maxfail:uinteger:5" \ + "file:string" +} diff --git a/utils/rng-tools/Makefile b/utils/rng-tools/Makefile index 6f6a8b410..0618c8409 100644 --- a/utils/rng-tools/Makefile +++ b/utils/rng-tools/Makefile @@ -8,20 +8,22 @@ include $(TOPDIR)/rules.mk PKG_NAME:=rng-tools -PKG_VERSION:=6.6 +PKG_VERSION:=6.7 PKG_RELEASE:=1 PKG_SOURCE_PROTO:=git -PKG_SOURCE_URL:=https://github.com/nhorman/rng-tools.git -PKG_SOURCE_VERSION:=4ebc21d6f387bb7b4b3f6badc429e27b21c0a6ee -PKG_MIRROR_HASH:=d942283b7482337d40a4933f7b24a5d1361518dacf9c87928f5ea06d492e95b0 -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.xz -PKG_LICENSE:=GPLv2 +PKG_SOURCE_URL:=https://github.com/nhorman/rng-tools +PKG_SOURCE_VERSION:=v$(PKG_VERSION) +PKG_MIRROR_HASH:=05cb68b8600025f362ea0875f5966b60f8195f91ed89b431996a48cd88b1e5b0 + PKG_MAINTAINER:=Nathaniel Wesley Filardo <nwfilardo@gmail.com> +PKG_LICENSE:=GPL-2.0-or-later +PKG_LICENSE_FILES:=COPYING PKG_FIXUP:=autoreconf - -PKG_BUILD_DEPENDS:=USE_UCLIBC:argp-standalone USE_MUSL:argp-standalone +PKG_INSTALL:=1 +PKG_BUILD_PARALLEL:=1 +PKG_BUILD_DEPENDS:=!USE_GLIBC:argp-standalone include $(INCLUDE_DIR)/package.mk @@ -34,24 +36,19 @@ define Package/rng-tools endef define Package/rng-tools/description -Daemon for adding entropy to kernel entropy pool. By default it uses -/dev/urandom as the source but the init script can be modified -to use a hardware source like /dev/hwrng if present + Daemon for adding entropy to kernel entropy pool. By default it uses + /dev/urandom as the source but the init script can be modified + to use a hardware source like /dev/hwrng if present endef -ifdef CONFIG_USE_UCLIBC -CONFIGURE_VARS += \ - LIBS="-largp" -endif - -ifdef CONFIG_USE_MUSL -CONFIGURE_VARS += \ - LIBS="-largp" -endif - CONFIGURE_ARGS += \ --without-libgcrypt \ - --without-nistbeacon + --without-nistbeacon \ + --without-pkcs11 + +ifndef CONFIG_USE_GLIBC + CONFIGURE_VARS += LIBS="-largp" +endif define Build/Prepare $(call Build/Prepare/Default) @@ -64,9 +61,9 @@ define Package/rng-tools/install $(INSTALL_DIR) $(1)/etc/uci-defaults $(INSTALL_BIN) ./files/rngd.uci_defaults $(1)/etc/uci-defaults/rngd $(INSTALL_DIR) $(1)/usr/bin - $(INSTALL_BIN) $(PKG_BUILD_DIR)/rngtest $(1)/usr/bin/ + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/bin/rngtest $(1)/usr/bin/ $(INSTALL_DIR) $(1)/sbin - $(INSTALL_BIN) $(PKG_BUILD_DIR)/rngd $(1)/sbin/ + $(INSTALL_BIN) $(PKG_INSTALL_DIR)/usr/sbin/rngd $(1)/sbin/ endef $(eval $(call BuildPackage,rng-tools)) diff --git a/utils/rtklib/Makefile b/utils/rtklib/Makefile index f179f2669..571985235 100644 --- a/utils/rtklib/Makefile +++ b/utils/rtklib/Makefile @@ -1,4 +1,4 @@ -# +# # Copyright (C) 2014-2016 OpenWrt.org # # This is free software, licensed under the GNU General Public License v2. @@ -7,18 +7,18 @@ include $(TOPDIR)/rules.mk PKG_NAME:=rtklib -PKG_VERSION:=2.4.3_b24 -PKG_RELEASE:=$(PKG_SOURCE_VERSION) +PKG_VERSION:=2.4.3_b32 +PKG_RELEASE:=1 PKG_SOURCE_PROTO:=git -PKG_SOURCE_URL:=git://github.com/tomojitakasu/RTKLIB.git -PKG_SOURCE_VERSION:=1cec90a9ffa424908ad1a4ca3d52f33f9b94d1f7 -PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION)-$(PKG_SOURCE_VERSION).tar.gz -PKG_MIRROR_HASH:=edda6c29ba3d2f5401145a1497e88646fa0c13afc31ade7bdd982bd8e8081c6a -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_LICENSE:=BSD-2-Clause +PKG_SOURCE_URL:=https://github.com/tomojitakasu/RTKLIB +PKG_SOURCE_VERSION:=6e5ddadb737c54d4a43c43feeeb4e244c51b4286 +PKG_MIRROR_HASH:=b6ada49b6667a98e935055e718bf9a5712030cddc1694d1be7c0ab0e98bdc7b8 PKG_MAINTAINER:=Nuno Goncalves <nunojpg@gmail.com> +PKG_LICENSE:=BSD-2-Clause + +PKG_BUILD_PARALLEL:=0 include $(INCLUDE_DIR)/package.mk diff --git a/utils/rtklib/patches/010-musl.patch b/utils/rtklib/patches/010-musl.patch new file mode 100644 index 000000000..14a405fed --- /dev/null +++ b/utils/rtklib/patches/010-musl.patch @@ -0,0 +1,12 @@ +--- a/src/stream.c ++++ b/src/stream.c +@@ -81,6 +81,9 @@ + #include <netinet/in.h> + #include <netinet/tcp.h> + #include <arpa/inet.h> ++#ifndef _POSIX_SOURCE ++#define _POSIX_SOURCE ++#endif + #include <netdb.h> + #endif + diff --git a/utils/rtklib/patches/020-no-gfortran.patch b/utils/rtklib/patches/020-no-gfortran.patch new file mode 100644 index 000000000..3bbd2fbe7 --- /dev/null +++ b/utils/rtklib/patches/020-no-gfortran.patch @@ -0,0 +1,21 @@ +--- a/app/rnx2rtkp/gcc/makefile ++++ b/app/rnx2rtkp/gcc/makefile +@@ -2,14 +2,14 @@ + BINDIR = /usr/local/bin + SRC = ../../../src + +-#OPTS = -DTRACE -DENAGLO -DENAQZS -DENAGAL -DENACMP -DENAIRN -DNFREQ=3 +-OPTS = -DTRACE -DENAGLO -DENAQZS -DENAGAL -DENACMP -DENAIRN -DNFREQ=3 -DIERS_MODEL ++OPTS = -DTRACE -DENAGLO -DENAQZS -DENAGAL -DENACMP -DENAIRN -DNFREQ=3 ++#OPTS = -DTRACE -DENAGLO -DENAQZS -DENAGAL -DENACMP -DENAIRN -DNFREQ=3 -DIERS_MODEL + #OPTS = -DENAGLO -DENAQZS -DENAGAL -DENACMP -DNFREQ=2 + + # for no lapack + CFLAGS = -Wall -O3 -ansi -pedantic -Wno-unused-but-set-variable -I$(SRC) $(OPTS) -g +-#LDLIBS = -lm -lrt +-LDLIBS = ../../../lib/iers/gcc/iers.a -lgfortran -lm -lrt ++LDLIBS = -lm -lrt ++#LDLIBS = ../../../lib/iers/gcc/iers.a -lgfortran -lm -lrt + + #CFLAGS = -Wall -O3 -ansi -pedantic -Wno-unused-but-set-variable -I$(SRC) -DLAPACK $(OPTS) + #LDLIBS = -lm -lrt -llapack -lblas diff --git a/utils/slide-switch/Makefile b/utils/slide-switch/Makefile index c4344ecb3..e3ac08369 100644 --- a/utils/slide-switch/Makefile +++ b/utils/slide-switch/Makefile @@ -8,15 +8,13 @@ include $(TOPDIR)/rules.mk PKG_NAME:=slide-switch -PKG_VERSION:=0.9.4 +PKG_VERSION:=0.9.5 PKG_RELEASE:=1 PKG_SOURCE_PROTO:=git PKG_SOURCE_URL:=https://github.com/jefferyto/openwrt-slide-switch.git -PKG_SOURCE_VERSION:=0.9.4 -PKG_SOURCE_SUBDIR:=$(PKG_NAME)-$(PKG_VERSION) -PKG_SOURCE:=$(PKG_SOURCE_SUBDIR).tar.xz -PKG_MIRROR_HASH:=52a93506c994b1babf174aec8ac8aebbf94f27263125d144f6d86db001dd24d6 +PKG_MIRROR_HASH:=ac61aea3ce620364285de5525635999aa8b463c4070da6bce134278ff92a433c +PKG_SOURCE_VERSION:=$(PKG_VERSION) PKG_BUILD_PARALLEL:=1 PKG_FIXUP:=autoreconf diff --git a/utils/tessdata/Makefile b/utils/tessdata/Makefile index c0fa83d52..6f12c776c 100644 --- a/utils/tessdata/Makefile +++ b/utils/tessdata/Makefile @@ -36,7 +36,6 @@ endef define Package/tesseract-data-default - SUBMENU:=Tesseract SECTION:=utils CATEGORY:=Utilities DEPENDS:=tesseract @@ -44,7 +43,7 @@ endef define generate-tesseract-data-package define Package/tesseract-data-$(1) - TITLE:=Tesseract training data for $(1) language + TITLE:=Training data for $(1) language $(call Package/tesseract-data-default) endef diff --git a/utils/ykpers/Makefile b/utils/ykpers/Makefile index c2bd1e7c1..1c883af64 100644 --- a/utils/ykpers/Makefile +++ b/utils/ykpers/Makefile @@ -1,23 +1,21 @@ include $(TOPDIR)/rules.mk PKG_NAME:=ykpers -PKG_VERSION:=1.19.0 +PKG_VERSION:=1.20.0 PKG_RELEASE:=1 -PKG_SOURCE:=ykpers-$(PKG_VERSION).tar.gz +PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://developers.yubico.com/yubikey-personalization/Releases -PKG_HASH:=2bc8afa16d495a486582bad916d16de1f67c0cce9bb0a35c3123376c2d609480 +PKG_HASH:=0ec84d0ea862f45a7d85a1a3afe5e60b8da42df211bb7d27a50f486e31a79b93 + PKG_MAINTAINER:=Stuart B. Wilkins <stuwilkins@mac.com> -PKG_LICENSE_FILES:=COPYING PKG_LICENSE:=BSD-2-Clause - -PKG_BUILD_DIR:=$(BUILD_DIR)/ykpers-$(PKG_VERSION) -PKG_BUILD_DEPENDS:=libyubikey +PKG_LICENSE_FILES:=COPYING include $(INCLUDE_DIR)/package.mk define Package/ykpers - SECTION:=utils + SECTION:=utils CATEGORY:=Utilities TITLE:=The Yuibco personalization package URL:=https://developers.yubico.com/yubikey-personalization/ @@ -25,7 +23,7 @@ define Package/ykpers endef define Package/ykpers/description - The YubiKey Personalization package contains a library and command + The YubiKey Personalization package contains a library and command line tool used to personalize (i.e., set a AES key) YubiKeys. endef @@ -33,7 +31,6 @@ CONFIGURE_ARGS += \ --enable-shared \ --disable-static - define Build/InstallDev $(INSTALL_DIR) $(STAGING_DIR)/usr/include $(CP) $(PKG_BUILD_DIR)/ykcore/*.h $(STAGING_DIR)/usr/include diff --git a/utils/zstd/Makefile b/utils/zstd/Makefile index 2f3048e24..03931dbe4 100644 --- a/utils/zstd/Makefile +++ b/utils/zstd/Makefile @@ -1,12 +1,12 @@ include $(TOPDIR)/rules.mk PKG_NAME:=zstd -PKG_VERSION:=1.4.0 -PKG_RELEASE:=4 +PKG_VERSION:=1.4.2 +PKG_RELEASE:=1 PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz PKG_SOURCE_URL:=https://codeload.github.com/facebook/zstd/tar.gz/v$(PKG_VERSION)? -PKG_HASH:=63be339137d2b683c6d19a9e34f4fb684790e864fee13c7dd40e197a64c705c1 +PKG_HASH:=7a6e1dad34054b35e2e847eb3289be8820a5d378228802239852f913c6dcf6a7 PKG_MAINTAINER:=Amol Bhave <ambhave@fb.com> PKG_LICENSE:=GPL-2.0-or-later @@ -21,13 +21,6 @@ include $(INCLUDE_DIR)/package.mk include $(INCLUDE_DIR)/host-build.mk include $(INCLUDE_DIR)/cmake.mk -ifeq ($(CONFIG_ZSTD_OPTIMIZE_O3),y) - TARGET_CFLAGS := $(filter-out -O%,$(TARGET_CFLAGS)) - TARGET_CFLAGS += -O3 - TARGET_CXXFLAGS := $(filter-out -O%,$(TARGET_CXXFLAGS)) - TARGET_CXXFLAGS += -O3 -endif - define Package/zstd/Default SUBMENU:=Compression URL:=https://github.com/facebook/zstd @@ -38,6 +31,7 @@ $(call Package/zstd/Default) SECTION:=libs CATEGORY:=Libraries TITLE:=zstd library. + MENU:=1 endef define Package/libzstd/description @@ -48,6 +42,7 @@ endef define Package/libzstd/config config ZSTD_OPTIMIZE_O3 bool "Use all optimizations (-O3)" + depends on PACKAGE_libzstd default y help This enables additional optmizations using the -O3 compilation flag. @@ -66,6 +61,13 @@ define Package/zstd/description This package provides the zstd binaries. endef +ifeq ($(CONFIG_ZSTD_OPTIMIZE_O3),y) +TARGET_CFLAGS:= $(filter-out -O%,$(TARGET_CFLAGS)) -O3 +endif + +TARGET_CFLAGS += -flto +TARGET_LDFLAGS += -Wl,--as-needed + define Package/libzstd/install $(INSTALL_DIR) $(1)/usr/lib $(CP) $(PKG_INSTALL_DIR)/usr/lib/libzstd.so* $(1)/usr/lib/ diff --git a/utils/zstd/patches/010-uClibc-ng.patch b/utils/zstd/patches/010-uClibc-ng.patch deleted file mode 100644 index 552286bec..000000000 --- a/utils/zstd/patches/010-uClibc-ng.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/programs/fileio.c -+++ b/programs/fileio.c -@@ -175,7 +175,7 @@ static void clearHandler(void) - - #if !defined(BACKTRACE_ENABLE) - /* automatic detector : backtrace enabled by default on linux+glibc and osx */ --# if (defined(__linux__) && defined(__GLIBC__)) \ -+# if (defined(__linux__) && (defined(__GLIBC__) && !defined(__UCLIBC__))) \ - || (defined(__APPLE__) && defined(__MACH__)) - # define BACKTRACE_ENABLE 1 - # else diff --git a/utils/zstd/patches/010-utime.patch b/utils/zstd/patches/010-utime.patch new file mode 100644 index 000000000..a67e696ac --- /dev/null +++ b/utils/zstd/patches/010-utime.patch @@ -0,0 +1,97 @@ +From 245a69c0f5784ba89c28301263bcfd5785ebe0ea Mon Sep 17 00:00:00 2001 +From: Rosen Penev <rosenp@gmail.com> +Date: Tue, 30 Jul 2019 17:17:07 -0700 +Subject: [PATCH] zstd: Don't use utime on Linux + +utime is deprecated by POSIX 2008 and optionally not available with +uClibc-ng. + +Got rid of a few useless headers in timefn.h. + +Signed-off-by: Rosen Penev <rosenp@gmail.com> +--- + programs/platform.h | 2 +- + programs/timefn.h | 6 ------ + programs/util.c | 10 ++++++++++ + programs/util.h | 5 +++-- + 4 files changed, 14 insertions(+), 9 deletions(-) + +diff --git a/programs/platform.h b/programs/platform.h +index 38ded8727..5934e59cf 100644 +--- a/programs/platform.h ++++ b/programs/platform.h +@@ -92,7 +92,7 @@ extern "C" { + + # if defined(__linux__) || defined(__linux) + # ifndef _POSIX_C_SOURCE +-# define _POSIX_C_SOURCE 200112L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */ ++# define _POSIX_C_SOURCE 200809L /* feature test macro : https://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html */ + # endif + # endif + # include <unistd.h> /* declares _POSIX_VERSION */ +diff --git a/programs/timefn.h b/programs/timefn.h +index d1ddd31b1..2db3765b9 100644 +--- a/programs/timefn.h ++++ b/programs/timefn.h +@@ -19,12 +19,6 @@ extern "C" { + /*-**************************************** + * Dependencies + ******************************************/ +-#include <sys/types.h> /* utime */ +-#if defined(_MSC_VER) +-# include <sys/utime.h> /* utime */ +-#else +-# include <utime.h> /* utime */ +-#endif + #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC */ + + +diff --git a/programs/util.c b/programs/util.c +index fb77d1783..3a2e9e28f 100644 +--- a/programs/util.c ++++ b/programs/util.c +@@ -54,14 +54,24 @@ int UTIL_getFileStat(const char* infilename, stat_t *statbuf) + int UTIL_setFileStat(const char *filename, stat_t *statbuf) + { + int res = 0; ++#if defined(_WIN32) + struct utimbuf timebuf; ++#else ++ struct timespec timebuf[2]; ++#endif + + if (!UTIL_isRegularFile(filename)) + return -1; + ++#if defined(_WIN32) + timebuf.actime = time(NULL); + timebuf.modtime = statbuf->st_mtime; + res += utime(filename, &timebuf); /* set access and modification times */ ++#else ++ timebuf[0].tv_sec = time(NULL); ++ timebuf[1].tv_sec = statbuf->st_mtime; ++ res += utimensat(AT_FDCWD, filename, timebuf, 0); /* set access and modification times */ ++#endif + + #if !defined(_WIN32) + res += chown(filename, statbuf->st_uid, statbuf->st_gid); /* Copy ownership */ +diff --git a/programs/util.h b/programs/util.h +index d6e5bb550..71d4c7c77 100644 +--- a/programs/util.h ++++ b/programs/util.h +@@ -25,12 +25,13 @@ extern "C" { + #include <stdio.h> /* fprintf */ + #include <sys/types.h> /* stat, utime */ + #include <sys/stat.h> /* stat, chmod */ +-#if defined(_MSC_VER) ++#if defined(_WIN32) + # include <sys/utime.h> /* utime */ + # include <io.h> /* _chmod */ + #else ++# include <fcntl.h> /* AT_FDCWD */ ++# include <sys/stat.h> /* utimensat */ + # include <unistd.h> /* chown, stat */ +-# include <utime.h> /* utime */ + #endif + #include <time.h> /* clock_t, clock, CLOCKS_PER_SEC, nanosleep */ + #include "mem.h" /* U32, U64 */ |