chore: merge release/stone with release/trift.
This commit is contained in:
commit
65365281eb
|
@ -41,3 +41,7 @@ cmake-build-*/
|
|||
|
||||
# Doxygen doc files
|
||||
_doc/
|
||||
|
||||
# gRPC auto-generated C++ source files
|
||||
*.pb.cc
|
||||
*.pb.h
|
||||
|
|
370
.gitlab-ci.yml
370
.gitlab-ci.yml
|
@ -30,13 +30,6 @@ stages:
|
|||
- test
|
||||
- build
|
||||
|
||||
.rules-branch-and-MR-always:
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH || $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
when: always
|
||||
allow_failure: false
|
||||
- when: never
|
||||
|
||||
.rules-branch-and-MR-manual:
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH || $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
|
@ -44,16 +37,6 @@ stages:
|
|||
allow_failure: true
|
||||
- when: never
|
||||
|
||||
.rules-branch-manual-MR-always:
|
||||
rules:
|
||||
- if: $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
when: always
|
||||
allow_failure: false
|
||||
- if: $CI_COMMIT_BRANCH
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- when: never
|
||||
|
||||
.rules-branch-manual-MR-and-devel-always:
|
||||
rules:
|
||||
- if: $CI_COMMIT_BRANCH == "devel" || $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
|
@ -64,173 +47,10 @@ stages:
|
|||
allow_failure: true
|
||||
- when: never
|
||||
|
||||
.after-script-code-coverage:
|
||||
after_script:
|
||||
- go get github.com/boumenot/gocover-cobertura
|
||||
- go run github.com/boumenot/gocover-cobertura < /tmp/coverage.out > coverage.xml
|
||||
- "go tool cover -func=/tmp/coverage.out | grep total:"
|
||||
coverage: '/total:.*\(statements\).*\d+\.\d+%/'
|
||||
artifacts:
|
||||
reports:
|
||||
coverage_report:
|
||||
coverage_format: cobertura
|
||||
path: coverage.xml
|
||||
|
||||
# Stage: TEST
|
||||
|
||||
lint:
|
||||
stage: test
|
||||
extends:
|
||||
- .rules-branch-and-MR-always
|
||||
script:
|
||||
- make lint
|
||||
tags:
|
||||
- medium
|
||||
|
||||
|
||||
.test-base:
|
||||
stage: test
|
||||
script:
|
||||
- make test
|
||||
|
||||
test-linux:
|
||||
extends:
|
||||
- .test-base
|
||||
- .rules-branch-manual-MR-and-devel-always
|
||||
- .after-script-code-coverage
|
||||
tags:
|
||||
- large
|
||||
|
||||
test-linux-race:
|
||||
extends:
|
||||
- test-linux
|
||||
- .rules-branch-and-MR-manual
|
||||
script:
|
||||
- make test-race
|
||||
|
||||
test-integration:
|
||||
extends:
|
||||
- test-linux
|
||||
script:
|
||||
- make test-integration
|
||||
tags:
|
||||
- large
|
||||
|
||||
test-integration-race:
|
||||
extends:
|
||||
- test-integration
|
||||
- .rules-branch-and-MR-manual
|
||||
script:
|
||||
- make test-integration-race
|
||||
|
||||
|
||||
.windows-base:
|
||||
before_script:
|
||||
- export GOROOT=/c/Go1.20
|
||||
- export PATH=$GOROOT/bin:$PATH
|
||||
- export GOARCH=amd64
|
||||
- export GOPATH=~/go1.20
|
||||
- export GO111MODULE=on
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
- export MSYSTEM=
|
||||
tags:
|
||||
- windows-bridge
|
||||
|
||||
#test-windows:
|
||||
# extends:
|
||||
# - .rules-branch-manual-MR-always
|
||||
# - .windows-base
|
||||
# stage: test
|
||||
# script:
|
||||
# - make test
|
||||
|
||||
# Stage: BUILD
|
||||
|
||||
.build-base:
|
||||
stage: build
|
||||
needs: ["lint"]
|
||||
rules:
|
||||
# GODT-1833: use `=~ /qa/` after mac and windows runners are fixed
|
||||
- if: $CI_JOB_NAME =~ /build-linux-qa/ && $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
when: always
|
||||
allow_failure: false
|
||||
- if: $CI_COMMIT_BRANCH || $CI_PIPELINE_SOURCE == "merge_request_event"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- when: never
|
||||
script:
|
||||
- make build
|
||||
- git diff && git diff-index --quiet HEAD
|
||||
- make vault-editor
|
||||
artifacts:
|
||||
expire_in: 1 day
|
||||
when: always
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_SHORT_SHA"
|
||||
paths:
|
||||
- bridge_*.tgz
|
||||
- vault-editor
|
||||
|
||||
|
||||
.linux-build-setup:
|
||||
image: gitlab.protontech.ch:4567/go/bridge-internal:build-go1.20-qt6.3.2
|
||||
variables:
|
||||
VCPKG_DEFAULT_BINARY_CACHE: ${CI_PROJECT_DIR}/.cache
|
||||
cache:
|
||||
key: linux-vcpkg
|
||||
paths:
|
||||
- .cache
|
||||
when: 'always'
|
||||
before_script:
|
||||
- mkdir -p .cache/bin
|
||||
- export PATH=$(pwd)/.cache/bin:$PATH
|
||||
- export GOPATH="$CI_PROJECT_DIR/.cache"
|
||||
- export PATH=$PATH:$QT6DIR/bin
|
||||
- $(git config --global -l | grep -o 'url.*gitlab.protontech.ch.*insteadof' | xargs -L 1 git config --global --unset &> /dev/null) || echo "nothing to remove"
|
||||
- git config --global url.https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}.insteadOf https://${CI_SERVER_HOST}
|
||||
tags:
|
||||
- large
|
||||
|
||||
build-linux:
|
||||
extends:
|
||||
- .build-base
|
||||
- .linux-build-setup
|
||||
|
||||
build-linux-qa:
|
||||
extends:
|
||||
- build-linux
|
||||
variables:
|
||||
BUILD_TAGS: "build_qa"
|
||||
|
||||
|
||||
.darwin-build-setup:
|
||||
before_script:
|
||||
- export PATH=/usr/local/bin:$PATH
|
||||
- export PATH=/usr/local/opt/git/bin:$PATH
|
||||
- export PATH=/usr/local/opt/make/libexec/gnubin:$PATH
|
||||
- export PATH=/usr/local/opt/go@1.13/bin:$PATH
|
||||
- export PATH=/usr/local/opt/gnu-sed/libexec/gnubin:$PATH
|
||||
- export GOPATH=~/go1.20
|
||||
- export PATH=$GOPATH/bin:$PATH
|
||||
- export CGO_CPPFLAGS='-Wno-error -Wno-nullability-completeness -Wno-expansion-to-defined -Wno-builtin-requires-header'
|
||||
- $(git config --global -l | grep -o 'url.*gitlab.protontech.ch.*insteadof' | xargs -L 1 git config --global --unset &> /dev/null) || echo "nothing to remove"
|
||||
- git config --global url.https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}.insteadOf https://${CI_SERVER_HOST}
|
||||
cache: {}
|
||||
tags:
|
||||
- macOS
|
||||
|
||||
build-darwin:
|
||||
extends:
|
||||
- .build-base
|
||||
- .darwin-build-setup
|
||||
|
||||
build-darwin-qa:
|
||||
extends:
|
||||
- build-darwin
|
||||
variables:
|
||||
BUILD_TAGS: "build_qa"
|
||||
|
||||
.windows-build-setup:
|
||||
# ENV
|
||||
.env-windows:
|
||||
before_script:
|
||||
- export BRIDGE_SYNC_FORCE_MINIMUM_SPEC=1
|
||||
- export GOROOT=/c/Go1.20/
|
||||
- export PATH=$GOROOT/bin:$PATH
|
||||
- export GOARCH=amd64
|
||||
|
@ -249,15 +69,177 @@ build-darwin-qa:
|
|||
tags:
|
||||
- windows-bridge
|
||||
|
||||
#build-windows:
|
||||
# extends:
|
||||
# - .build-base
|
||||
# - .windows-build-setup
|
||||
#
|
||||
##build-windows-qa:
|
||||
# extends:
|
||||
# - build-windows
|
||||
# variables:
|
||||
# BUILD_TAGS: "build_qa"
|
||||
#
|
||||
.env-darwin:
|
||||
before_script:
|
||||
- export BRIDGE_SYNC_FORCE_MINIMUM_SPEC=1
|
||||
- export PATH=/usr/local/bin:$PATH
|
||||
- export PATH=/usr/local/opt/git/bin:$PATH
|
||||
- export PATH=/usr/local/opt/make/libexec/gnubin:$PATH
|
||||
- export PATH=/usr/local/opt/gnu-sed/libexec/gnubin:$PATH
|
||||
- export GOROOT=~/local/opt/go@1.20
|
||||
- export PATH="${GOROOT}/bin:$PATH"
|
||||
- export GOPATH=~/go1.20
|
||||
- export PATH="${GOPATH}/bin:$PATH"
|
||||
- export QT6DIR=/opt/Qt/6.3.2/macos
|
||||
- export PATH="${QT6DIR}/bin:$PATH"
|
||||
- uname -a
|
||||
cache: {}
|
||||
tags:
|
||||
- macos-m1-bridge
|
||||
|
||||
.env-linux-build:
|
||||
image: gitlab.protontech.ch:4567/go/bridge-internal:build-go1.20-qt6.3.2
|
||||
variables:
|
||||
VCPKG_DEFAULT_BINARY_CACHE: ${CI_PROJECT_DIR}/.cache
|
||||
cache:
|
||||
key: linux-vcpkg
|
||||
paths:
|
||||
- .cache
|
||||
when: 'always'
|
||||
before_script:
|
||||
- mkdir -p .cache/bin
|
||||
- export BRIDGE_SYNC_FORCE_MINIMUM_SPEC=1
|
||||
- export PATH=$(pwd)/.cache/bin:$PATH
|
||||
- export GOPATH="$CI_PROJECT_DIR/.cache"
|
||||
- export PATH=$PATH:$QT6DIR/bin
|
||||
- $(git config --global -l | grep -o 'url.*gitlab.protontech.ch.*insteadof' | xargs -L 1 git config --global --unset &> /dev/null) || echo "nothing to remove"
|
||||
- git config --global url.https://gitlab-ci-token:${CI_JOB_TOKEN}@${CI_SERVER_HOST}.insteadOf https://${CI_SERVER_HOST}
|
||||
tags:
|
||||
- large
|
||||
|
||||
# Stage: TEST
|
||||
|
||||
lint:
|
||||
stage: test
|
||||
extends:
|
||||
- .rules-branch-manual-MR-and-devel-always
|
||||
script:
|
||||
- make lint
|
||||
tags:
|
||||
- medium
|
||||
|
||||
.script-test:
|
||||
stage: test
|
||||
extends:
|
||||
- .rules-branch-manual-MR-and-devel-always
|
||||
script:
|
||||
- make test
|
||||
artifacts:
|
||||
paths:
|
||||
- coverage/**
|
||||
|
||||
test-linux:
|
||||
extends:
|
||||
- .script-test
|
||||
tags:
|
||||
- large
|
||||
|
||||
test-linux-race:
|
||||
extends:
|
||||
- test-linux
|
||||
- .rules-branch-and-MR-manual
|
||||
script:
|
||||
- make test-race
|
||||
|
||||
test-integration:
|
||||
extends:
|
||||
- test-linux
|
||||
script:
|
||||
- make test-integration
|
||||
|
||||
test-integration-race:
|
||||
extends:
|
||||
- test-integration
|
||||
- .rules-branch-and-MR-manual
|
||||
script:
|
||||
- make test-integration-race
|
||||
|
||||
test-windows:
|
||||
extends:
|
||||
- .env-windows
|
||||
- .script-test
|
||||
- .rules-branch-and-MR-manual
|
||||
|
||||
test-darwin:
|
||||
extends:
|
||||
- .env-darwin
|
||||
- .script-test
|
||||
|
||||
test-coverage:
|
||||
stage: test
|
||||
extends:
|
||||
- .rules-branch-manual-MR-and-devel-always
|
||||
script:
|
||||
- ./utils/coverage.sh
|
||||
coverage: '/total:.*\(statements\).*\d+\.\d+%/'
|
||||
needs:
|
||||
- test-linux
|
||||
#- test-windows
|
||||
- test-darwin
|
||||
- test-integration
|
||||
tags:
|
||||
- small
|
||||
artifacts:
|
||||
paths:
|
||||
- coverage*
|
||||
- coverage/**
|
||||
when: 'always'
|
||||
reports:
|
||||
coverage_report:
|
||||
coverage_format: cobertura
|
||||
path: coverage.xml
|
||||
|
||||
# Stage: BUILD
|
||||
|
||||
.script-build:
|
||||
stage: build
|
||||
needs: ["lint"]
|
||||
extends:
|
||||
- .rules-branch-and-MR-manual
|
||||
script:
|
||||
- make build
|
||||
- git diff && git diff-index --quiet HEAD
|
||||
- make vault-editor
|
||||
artifacts:
|
||||
expire_in: 1 day
|
||||
when: always
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_SHORT_SHA"
|
||||
paths:
|
||||
- bridge_*.tgz
|
||||
- vault-editor
|
||||
|
||||
build-linux:
|
||||
extends:
|
||||
- .script-build
|
||||
- .env-linux-build
|
||||
|
||||
build-linux-qa:
|
||||
extends:
|
||||
- build-linux
|
||||
- .rules-branch-manual-MR-and-devel-always
|
||||
variables:
|
||||
BUILD_TAGS: "build_qa"
|
||||
|
||||
build-darwin:
|
||||
extends:
|
||||
- .script-build
|
||||
- .env-darwin
|
||||
|
||||
build-darwin-qa:
|
||||
extends:
|
||||
- build-darwin
|
||||
variables:
|
||||
BUILD_TAGS: "build_qa"
|
||||
|
||||
build-windows:
|
||||
extends:
|
||||
- .script-build
|
||||
- .env-windows
|
||||
|
||||
build-windows-qa:
|
||||
extends:
|
||||
- build-windows
|
||||
variables:
|
||||
BUILD_TAGS: "build_qa"
|
||||
|
||||
# TODO: PUT BACK ALL THE JOBS! JUST DID THIS FOR NOW TO GET CI WORKING AGAIN...
|
||||
|
|
|
@ -36,6 +36,14 @@ issues:
|
|||
- gosec
|
||||
- goconst
|
||||
- dogsled
|
||||
- path: utils/smtp-send
|
||||
linters:
|
||||
- dupl
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- gosec
|
||||
- goconst
|
||||
- dogsled
|
||||
|
||||
linters-settings:
|
||||
godox:
|
||||
|
|
|
@ -58,7 +58,7 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||
* [testify](https://github.com/stretchr/testify) available under [license](https://github.com/stretchr/testify/blob/master/LICENSE)
|
||||
* [cli](https://github.com/urfave/cli/v2) available under [license](https://github.com/urfave/cli/v2/blob/master/LICENSE)
|
||||
* [msgpack](https://github.com/vmihailenco/msgpack/v5) available under [license](https://github.com/vmihailenco/msgpack/v5/blob/master/LICENSE)
|
||||
* [goleak](https://go.uber.org/goleak)
|
||||
* [goleak](https://go.uber.org/goleak) available under [license](https://pkg.go.dev/go.uber.org/goleak?tab=licenses)
|
||||
* [exp](https://golang.org/x/exp) available under [license](https://cs.opensource.google/go/x/exp/+/master:LICENSE)
|
||||
* [net](https://golang.org/x/net) available under [license](https://cs.opensource.google/go/x/net/+/master:LICENSE)
|
||||
* [sys](https://golang.org/x/sys) available under [license](https://cs.opensource.google/go/x/sys/+/master:LICENSE)
|
||||
|
@ -66,16 +66,12 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||
* [grpc](https://google.golang.org/grpc) available under [license](https://github.com/grpc/grpc-go/blob/master/LICENSE)
|
||||
* [protobuf](https://google.golang.org/protobuf) available under [license](https://github.com/protocolbuffers/protobuf/blob/main/LICENSE)
|
||||
* [plist](https://howett.net/plist) available under [license](https://github.com/DHowett/go-plist/blob/main/LICENSE)
|
||||
* [atlas](https://ariga.io/atlas)
|
||||
* [ent](https://entgo.io/ent)
|
||||
* [bcrypt](https://github.com/ProtonMail/bcrypt) available under [license](https://github.com/ProtonMail/bcrypt/blob/master/LICENSE)
|
||||
* [go-crypto](https://github.com/ProtonMail/go-crypto) available under [license](https://github.com/ProtonMail/go-crypto/blob/master/LICENSE)
|
||||
* [go-mime](https://github.com/ProtonMail/go-mime) available under [license](https://github.com/ProtonMail/go-mime/blob/master/LICENSE)
|
||||
* [go-srp](https://github.com/ProtonMail/go-srp) available under [license](https://github.com/ProtonMail/go-srp/blob/master/LICENSE)
|
||||
* [readline](https://github.com/abiosoft/readline) available under [license](https://github.com/abiosoft/readline/blob/master/LICENSE)
|
||||
* [levenshtein](https://github.com/agext/levenshtein) available under [license](https://github.com/agext/levenshtein/blob/master/LICENSE)
|
||||
* [cascadia](https://github.com/andybalholm/cascadia) available under [license](https://github.com/andybalholm/cascadia/blob/master/LICENSE)
|
||||
* [go-textseg](https://github.com/apparentlymart/go-textseg/v13) available under [license](https://github.com/apparentlymart/go-textseg/v13/blob/master/LICENSE)
|
||||
* [sonic](https://github.com/bytedance/sonic) available under [license](https://github.com/bytedance/sonic/blob/master/LICENSE)
|
||||
* [base64x](https://github.com/chenzhuoyu/base64x) available under [license](https://github.com/chenzhuoyu/base64x/blob/master/LICENSE)
|
||||
* [test](https://github.com/chzyer/test) available under [license](https://github.com/chzyer/test/blob/master/LICENSE)
|
||||
|
@ -93,7 +89,6 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||
* [mimetype](https://github.com/gabriel-vasile/mimetype) available under [license](https://github.com/gabriel-vasile/mimetype/blob/master/LICENSE)
|
||||
* [sse](https://github.com/gin-contrib/sse) available under [license](https://github.com/gin-contrib/sse/blob/master/LICENSE)
|
||||
* [gin](https://github.com/gin-gonic/gin) available under [license](https://github.com/gin-gonic/gin/blob/master/LICENSE)
|
||||
* [inflect](https://github.com/go-openapi/inflect) available under [license](https://github.com/go-openapi/inflect/blob/master/LICENSE)
|
||||
* [locales](https://github.com/go-playground/locales) available under [license](https://github.com/go-playground/locales/blob/master/LICENSE)
|
||||
* [universal-translator](https://github.com/go-playground/universal-translator) available under [license](https://github.com/go-playground/universal-translator/blob/master/LICENSE)
|
||||
* [validator](https://github.com/go-playground/validator/v10) available under [license](https://github.com/go-playground/validator/v10/blob/master/LICENSE)
|
||||
|
@ -105,7 +100,6 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||
* [go-immutable-radix](https://github.com/hashicorp/go-immutable-radix) available under [license](https://github.com/hashicorp/go-immutable-radix/blob/master/LICENSE)
|
||||
* [go-memdb](https://github.com/hashicorp/go-memdb) available under [license](https://github.com/hashicorp/go-memdb/blob/master/LICENSE)
|
||||
* [golang-lru](https://github.com/hashicorp/golang-lru) available under [license](https://github.com/hashicorp/golang-lru/blob/master/LICENSE)
|
||||
* [hcl](https://github.com/hashicorp/hcl/v2) available under [license](https://github.com/hashicorp/hcl/v2/blob/master/LICENSE)
|
||||
* [multierror](https://github.com/joeshaw/multierror) available under [license](https://github.com/joeshaw/multierror/blob/master/LICENSE)
|
||||
* [go](https://github.com/json-iterator/go) available under [license](https://github.com/json-iterator/go/blob/master/LICENSE)
|
||||
* [cpuid](https://github.com/klauspost/cpuid/v2) available under [license](https://github.com/klauspost/cpuid/v2/blob/master/LICENSE)
|
||||
|
@ -114,7 +108,6 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||
* [go-isatty](https://github.com/mattn/go-isatty) available under [license](https://github.com/mattn/go-isatty/blob/master/LICENSE)
|
||||
* [go-runewidth](https://github.com/mattn/go-runewidth) available under [license](https://github.com/mattn/go-runewidth/blob/master/LICENSE)
|
||||
* [go-sqlite3](https://github.com/mattn/go-sqlite3) available under [license](https://github.com/mattn/go-sqlite3/blob/master/LICENSE)
|
||||
* [go-wordwrap](https://github.com/mitchellh/go-wordwrap) available under [license](https://github.com/mitchellh/go-wordwrap/blob/master/LICENSE)
|
||||
* [concurrent](https://github.com/modern-go/concurrent) available under [license](https://github.com/modern-go/concurrent/blob/master/LICENSE)
|
||||
* [reflect2](https://github.com/modern-go/reflect2) available under [license](https://github.com/modern-go/reflect2/blob/master/LICENSE)
|
||||
* [tablewriter](https://github.com/olekukonko/tablewriter) available under [license](https://github.com/olekukonko/tablewriter/blob/master/LICENSE)
|
||||
|
@ -130,14 +123,13 @@ Proton Mail Bridge includes the following 3rd party software:
|
|||
* [codec](https://github.com/ugorji/go/codec) available under [license](https://github.com/ugorji/go/codec/blob/master/LICENSE)
|
||||
* [tagparser](https://github.com/vmihailenco/tagparser/v2) available under [license](https://github.com/vmihailenco/tagparser/v2/blob/master/LICENSE)
|
||||
* [smetrics](https://github.com/xrash/smetrics) available under [license](https://github.com/xrash/smetrics/blob/master/LICENSE)
|
||||
* [go-cty](https://github.com/zclconf/go-cty) available under [license](https://github.com/zclconf/go-cty/blob/master/LICENSE)
|
||||
* [arch](https://golang.org/x/arch) available under [license](https://cs.opensource.google/go/x/arch/+/master:LICENSE)
|
||||
* [crypto](https://golang.org/x/crypto) available under [license](https://cs.opensource.google/go/x/crypto/+/master:LICENSE)
|
||||
* [mod](https://golang.org/x/mod) available under [license](https://cs.opensource.google/go/x/mod/+/master:LICENSE)
|
||||
* [sync](https://golang.org/x/sync) available under [license](https://cs.opensource.google/go/x/sync/+/master:LICENSE)
|
||||
* [tools](https://golang.org/x/tools) available under [license](https://cs.opensource.google/go/x/tools/+/master:LICENSE)
|
||||
* [genproto](https://google.golang.org/genproto)
|
||||
gopkg.in/yaml.v3
|
||||
* [genproto](https://google.golang.org/genproto) available under [license](https://pkg.go.dev/google.golang.org/genproto?tab=licenses)
|
||||
* [yaml](https://gopkg.in/yaml.v3) available under [license](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE)
|
||||
* [docker-credential-helpers](https://github.com/ProtonMail/docker-credential-helpers) available under [license](https://github.com/ProtonMail/docker-credential-helpers/blob/master/LICENSE)
|
||||
* [go-message](https://github.com/ProtonMail/go-message) available under [license](https://github.com/ProtonMail/go-message/blob/master/LICENSE)
|
||||
* [go-keychain](https://github.com/cuthix/go-keychain) available under [license](https://github.com/cuthix/go-keychain/blob/master/LICENSE)
|
||||
|
|
50
Changelog.md
50
Changelog.md
|
@ -3,6 +3,56 @@
|
|||
Changelog [format](http://keepachangelog.com/en/1.0.0/)
|
||||
|
||||
|
||||
## [Bridge 3.4.0] Trift changelog
|
||||
|
||||
### Changed
|
||||
* Test: Add require.Eventually to TestBridge_UserAgentFromSMTPClient.
|
||||
* Test: Add smtp-send utility.
|
||||
* GODT-2759: Check for oprhan messages.
|
||||
* GODT-2759: Add prompt to download missing messages for analysis.
|
||||
* GODT-2759: CLI debug commands.
|
||||
* Remove gRPC auto-generated C++ source files.
|
||||
* GODT-2749: Manual test-windows again.
|
||||
* Test: Force all unit test to use minimum sync spec.
|
||||
* Test: Force sync limits to minimum with env variable.
|
||||
* GODT-2749: Manual windows-test.
|
||||
* GODT-2691: Close logrus output file on exit.
|
||||
* GODT-2522: New Gluon database layout.
|
||||
* GODT-2678: When internet is off, do not display status dot icon for the user in the context menu.
|
||||
* GODT-2686: Change the orientation of the expand/collapse arrow for Advanced settings.
|
||||
* Test(GODT-2636): Add step for sending from EML.
|
||||
* Log failed message ids during sync.
|
||||
* GODT-2510: Remove Ent.
|
||||
* Test(GODT-2600): Changing state (read/unread, starred/unstarred) of a message in integration tests.
|
||||
* GODT-2703: Got rid of account details dialog with Apple Mail autoconf.
|
||||
* GODT-2685: Update to bug report log attachment logic.
|
||||
* GODT-2690: Update sentry reporting in GUI for new log file naming.
|
||||
* GODT-2668: Implemented new log retention policy.
|
||||
* Test(GODT-2683): Save Draft without "Date" & "From" in headers.
|
||||
* GODT-2666: Feat(GODT-2667): introduce sessionID in bridge.
|
||||
* GODT-2653: Log API error details on Message import and send.
|
||||
* GODT-2674: Add more logs to failed update.
|
||||
* GODT-2660: Calculate bridge coverage and refactor CI yaml file.
|
||||
* Fix dependency_license script to handle dot formated version.
|
||||
* Add error logs when messages fail to build during sync.
|
||||
* GODT-2673: Use NoClient as UserAgent without any client connected and...
|
||||
* GODT-2655: Display internal build time tag in log and GUI.
|
||||
|
||||
### Fixed
|
||||
* GODT-2758: Fix panic in SetFlagsOnMessages.
|
||||
* GODT-2578: Refresh literals appended to Sent folder.
|
||||
* GODT-2753: Vault test now check that value auto-assigned is first available port.
|
||||
* GODT-2522: Handle migration with unreferenced db values.
|
||||
* GODT-2693: Allow missing whitespace after header field colon.
|
||||
* GODT-2653: Only log when err is not nil.
|
||||
* GODT-2680: Fix for C++ debugger not working on ARM64 because of OpenSSL 3.1.
|
||||
* GODT-2675: Update GPA to applye togin-gonic/gin patch + update COPYING_NOTES.
|
||||
* GODT-2672: Fix context cancelled when IMAP/SMTP parameters change is in progress.
|
||||
* GODT-2763: Missing Answered flag on Sync and Message Create.
|
||||
* GODT-2774: Only check telemetry availability for the current user.
|
||||
* GODT-2774: Add external context to telemetry tasks.
|
||||
|
||||
|
||||
## Stone Bridge 3.3.2
|
||||
|
||||
### Fixed
|
||||
|
|
20
Makefile
20
Makefile
|
@ -11,7 +11,7 @@ ROOT_DIR:=$(shell dirname $(realpath $(firstword $(MAKEFILE_LIST))))
|
|||
.PHONY: build build-gui build-nogui build-launcher versioner hasher
|
||||
|
||||
# Keep version hardcoded so app build works also without Git repository.
|
||||
BRIDGE_APP_VERSION?=3.3.2+git
|
||||
BRIDGE_APP_VERSION?=3.4.0+git
|
||||
APP_VERSION:=${BRIDGE_APP_VERSION}
|
||||
APP_FULL_NAME:=Proton Mail Bridge
|
||||
APP_VENDOR:=Proton AG
|
||||
|
@ -229,14 +229,28 @@ add-license:
|
|||
change-copyright-year:
|
||||
./utils/missing_license.sh change-year
|
||||
|
||||
GOCOVERAGE=-covermode=count -coverpkg=github.com/ProtonMail/proton-bridge/v3/internal/...,github.com/ProtonMail/proton-bridge/v3/pkg/...,
|
||||
GOCOVERDIR=-args -test.gocoverdir=$$PWD/coverage
|
||||
|
||||
test: gofiles
|
||||
go test -v -timeout=20m -p=1 -count=1 -coverprofile=/tmp/coverage.out -run=${TESTRUN} ./internal/... ./pkg/...
|
||||
mkdir -p coverage/unit-${GOOS}
|
||||
go test \
|
||||
-v -timeout=20m -p=1 -count=1 \
|
||||
${GOCOVERAGE} \
|
||||
-run=${TESTRUN} ./internal/... ./pkg/... \
|
||||
${GOCOVERDIR}/unit-${GOOS}
|
||||
|
||||
test-race: gofiles
|
||||
go test -v -timeout=40m -p=1 -count=1 -race -failfast -run=${TESTRUN} ./internal/... ./pkg/...
|
||||
|
||||
test-integration: gofiles
|
||||
go test -v -timeout=60m -p=1 -count=1 github.com/ProtonMail/proton-bridge/v3/tests
|
||||
mkdir -p coverage/integration
|
||||
go test \
|
||||
-v -timeout=60m -p=1 -count=1 \
|
||||
${GOCOVERAGE} \
|
||||
github.com/ProtonMail/proton-bridge/v3/tests \
|
||||
${GOCOVERDIR}/integration
|
||||
|
||||
|
||||
test-integration-debug: gofiles
|
||||
dlv test github.com/ProtonMail/proton-bridge/v3/tests -- -test.v -test.timeout=10m -test.parallel=1 -test.count=1
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
@ -43,9 +44,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
appName = "Proton Mail Launcher"
|
||||
exeName = "bridge"
|
||||
guiName = "bridge-gui"
|
||||
appName = "Proton Mail Launcher"
|
||||
exeName = "bridge"
|
||||
guiName = "bridge-gui"
|
||||
launcherName = "launcher"
|
||||
|
||||
FlagCLI = "cli"
|
||||
FlagCLIShort = "c"
|
||||
|
@ -53,6 +55,7 @@ const (
|
|||
FlagNonInteractiveShort = "n"
|
||||
FlagLauncher = "--launcher"
|
||||
FlagWait = "--wait"
|
||||
FlagSessionID = "--session-id"
|
||||
)
|
||||
|
||||
func main() { //nolint:funlen
|
||||
|
@ -75,12 +78,26 @@ func main() { //nolint:funlen
|
|||
if err != nil {
|
||||
l.WithError(err).Fatal("Failed to get logs path")
|
||||
}
|
||||
crashHandler.AddRecoveryAction(logging.DumpStackTrace(logsPath))
|
||||
|
||||
if err := logging.Init(logsPath, os.Getenv("VERBOSITY")); err != nil {
|
||||
sessionID := logging.NewSessionID()
|
||||
crashHandler.AddRecoveryAction(logging.DumpStackTrace(logsPath, sessionID, launcherName))
|
||||
|
||||
var closer io.Closer
|
||||
if closer, err = logging.Init(
|
||||
logsPath,
|
||||
sessionID,
|
||||
logging.LauncherShortAppName,
|
||||
logging.DefaultMaxLogFileSize,
|
||||
logging.NoPruning,
|
||||
os.Getenv("VERBOSITY"),
|
||||
); err != nil {
|
||||
l.WithError(err).Fatal("Failed to setup logging")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = logging.Close(closer)
|
||||
}()
|
||||
|
||||
updatesPath, err := locations.ProvideUpdatesPath()
|
||||
if err != nil {
|
||||
l.WithError(err).Fatal("Failed to get updates path")
|
||||
|
@ -134,7 +151,7 @@ func main() { //nolint:funlen
|
|||
}
|
||||
}
|
||||
|
||||
cmd := execabs.Command(exe, appendLauncherPath(launcher, args)...) //nolint:gosec
|
||||
cmd := execabs.Command(exe, appendLauncherPath(launcher, append(args, FlagSessionID, string(sessionID)))...) //nolint:gosec
|
||||
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
|
|
14
go.mod
14
go.mod
|
@ -5,9 +5,9 @@ go 1.20
|
|||
require (
|
||||
github.com/0xAX/notificator v0.0.0-20220220101646-ee9b8921e557
|
||||
github.com/Masterminds/semver/v3 v3.2.0
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230706112359-3146d8312d12
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230706110757-a9327fb18611
|
||||
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a
|
||||
github.com/ProtonMail/go-proton-api v0.4.1-0.20230628092916-81cb3f87f184
|
||||
github.com/ProtonMail/go-proton-api v0.4.1-0.20230704060229-a77a437ec052
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.1-proton
|
||||
github.com/PuerkitoBio/goquery v1.8.1
|
||||
github.com/abiosoft/ishell v2.0.0+incompatible
|
||||
|
@ -51,16 +51,12 @@ require (
|
|||
)
|
||||
|
||||
require (
|
||||
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb // indirect
|
||||
entgo.io/ent v0.11.8 // indirect
|
||||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230518184743-7afd39499903 // indirect
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f // indirect
|
||||
github.com/ProtonMail/go-srp v0.0.7 // indirect
|
||||
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect
|
||||
github.com/bytedance/sonic v1.9.1 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/chzyer/test v1.0.0 // indirect
|
||||
|
@ -78,7 +74,6 @@ require (
|
|||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/gin-gonic/gin v1.9.1 // indirect
|
||||
github.com/go-openapi/inflect v0.19.0 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.0 // indirect
|
||||
|
@ -90,7 +85,6 @@ require (
|
|||
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
|
||||
github.com/hashicorp/go-memdb v1.3.3 // indirect
|
||||
github.com/hashicorp/golang-lru v0.5.4 // indirect
|
||||
github.com/hashicorp/hcl/v2 v2.16.1 // indirect
|
||||
github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.4 // indirect
|
||||
|
@ -98,8 +92,7 @@ require (
|
|||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.17 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
|
@ -115,7 +108,6 @@ require (
|
|||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/zclconf/go-cty v1.12.1 // indirect
|
||||
golang.org/x/arch v0.3.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
|
|
42
go.sum
42
go.sum
|
@ -1,5 +1,3 @@
|
|||
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb h1:mbsFtavDqGdYwdDpP50LGOOZ2hgyGoJcZeOpbgKMyu4=
|
||||
ariga.io/atlas v0.9.1-0.20230119145809-92243f7c55cb/go.mod h1:T230JFcENj4ZZzMkZrXFDSkv+2kXkUgpJ5FQQ5hMcKU=
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
|
@ -13,13 +11,10 @@ cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqCl
|
|||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
entgo.io/ent v0.11.8 h1:M/M0QL1CYCUSdqGRXUrXhFYSDRJPsOOrr+RLEej/gyQ=
|
||||
entgo.io/ent v0.11.8/go.mod h1:ericBi6Q8l3wBH1wEIDfKxw7rcQEuRPyBfbIzjtxJ18=
|
||||
github.com/0xAX/notificator v0.0.0-20220220101646-ee9b8921e557 h1:l6surSnJ3RP4qA1qmKJ+hQn3UjytosdoG27WGjrDlVs=
|
||||
github.com/0xAX/notificator v0.0.0-20220220101646-ee9b8921e557/go.mod h1:sTrmvD/TxuypdOERsDOS7SndZg0rzzcCi1b6wQMXUYM=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g=
|
||||
github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
|
@ -28,10 +23,8 @@ github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf h1:yc9daCCYUefEs
|
|||
github.com/ProtonMail/bcrypt v0.0.0-20211005172633-e235017c1baf/go.mod h1:o0ESU9p83twszAU8LBeJKFAAMX14tISa0yk4Oo5TOqo=
|
||||
github.com/ProtonMail/docker-credential-helpers v1.1.0 h1:+kvUIpwWcbtP3WFv5sSvkFn/XLzSqPOB5AAthuk9xPk=
|
||||
github.com/ProtonMail/docker-credential-helpers v1.1.0/go.mod h1:mK0aBveCxhnQ756AmaTfXMZDeULvheYVhF/MWMErN5g=
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230607122549-dbdb8e1cc0c3 h1:VMbbJD3dcGPPIgbdQTS5Z4nX0QU/SsVZWdmsMVVBBsI=
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230607122549-dbdb8e1cc0c3/go.mod h1:ERZikuN+2i/oTeSwS5fq7J0Fms76uUcBlTAwT4KaEAk=
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230706112359-3146d8312d12 h1:a4mVvmGGojclWgbQ6g4eW/XquioHJ/iYF4OFk70265Q=
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230706112359-3146d8312d12/go.mod h1:ERZikuN+2i/oTeSwS5fq7J0Fms76uUcBlTAwT4KaEAk=
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230706110757-a9327fb18611 h1:QVydPr/+pgz5xihc2ujNNV+qnq3oTidIXvF0PgkcY6U=
|
||||
github.com/ProtonMail/gluon v0.16.1-0.20230706110757-a9327fb18611/go.mod h1:Og5/Dz1MiGpCJn51XujZwxiLG7WzvvjE5PRpZBQmAHo=
|
||||
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a h1:D+aZah+k14Gn6kmL7eKxoo/4Dr/lK3ChBcwce2+SQP4=
|
||||
github.com/ProtonMail/go-autostart v0.0.0-20210130080809-00ed301c8e9a/go.mod h1:oTGdE7/DlWIr23G0IKW3OXK9wZ5Hw1GGiaJFccTvZi4=
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20230321155629-9a39f2531310/go.mod h1:8TI4H3IbrackdNgv+92dI+rhpCaLqM0IfpgCgenFvRE=
|
||||
|
@ -42,8 +35,8 @@ github.com/ProtonMail/go-message v0.13.1-0.20230526094639-b62c999c85b7 h1:+j+Kd/
|
|||
github.com/ProtonMail/go-message v0.13.1-0.20230526094639-b62c999c85b7/go.mod h1:NBAn21zgCJ/52WLDyed18YvYFm5tEoeDauubFqLokM4=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f h1:tCbYj7/299ekTTXpdwKYF8eBlsYsDVoggDAuAjoK66k=
|
||||
github.com/ProtonMail/go-mime v0.0.0-20230322103455-7d82a3887f2f/go.mod h1:gcr0kNtGBqin9zDW9GOHcVntrwnjrK+qdJ06mWYBybw=
|
||||
github.com/ProtonMail/go-proton-api v0.4.1-0.20230628092916-81cb3f87f184 h1:gw8sgQMCIDS/lw5xbF2iqlTfvY0HhuafjlGsKcN3VsE=
|
||||
github.com/ProtonMail/go-proton-api v0.4.1-0.20230628092916-81cb3f87f184/go.mod h1:+aTJoYu8bqzGECXL2DOdiZTZ64bGn3w0NC8VcFpJrFM=
|
||||
github.com/ProtonMail/go-proton-api v0.4.1-0.20230704060229-a77a437ec052 h1:uIq0RX4gU9PSZ9x5b2LmJUXNOuBXRRVSOkM1RGnSy68=
|
||||
github.com/ProtonMail/go-proton-api v0.4.1-0.20230704060229-a77a437ec052/go.mod h1:+aTJoYu8bqzGECXL2DOdiZTZ64bGn3w0NC8VcFpJrFM=
|
||||
github.com/ProtonMail/go-srp v0.0.7 h1:Sos3Qk+th4tQR64vsxGIxYpN3rdnG9Wf9K4ZloC1JrI=
|
||||
github.com/ProtonMail/go-srp v0.0.7/go.mod h1:giCp+7qRnMIcCvI6V6U3S1lDDXDQYx2ewJ6F/9wdlJk=
|
||||
github.com/ProtonMail/gopenpgp/v2 v2.7.1-proton h1:YS6M20yvjCJPR1r4ADW5TPn6rahs4iAyZaACei86bEc=
|
||||
|
@ -54,8 +47,6 @@ github.com/abiosoft/ishell v2.0.0+incompatible h1:zpwIuEHc37EzrsIYah3cpevrIc8Oma
|
|||
github.com/abiosoft/ishell v2.0.0+incompatible/go.mod h1:HQR9AqF2R3P4XXpMpI0NAzgHf/aS6+zVXRj14cVk9qg=
|
||||
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db h1:CjPUSXOiYptLbTdr1RceuZgSFDQ7U15ITERUGrUORx8=
|
||||
github.com/abiosoft/readline v0.0.0-20180607040430-155bce2042db/go.mod h1:rB3B4rKii8V21ydCbIzH5hZiCQE7f5E9SzUb/ZZx530=
|
||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allan-simon/go-singleinstance v0.0.0-20210120080615-d0997106ab37 h1:28uU3TtuvQ6KRndxg9TrC868jBWmSKgh0GTXkACCXmA=
|
||||
|
@ -63,9 +54,6 @@ github.com/allan-simon/go-singleinstance v0.0.0-20210120080615-d0997106ab37/go.m
|
|||
github.com/andybalholm/cascadia v1.3.1/go.mod h1:R4bJ1UQfqADjvDa4P6HZHLh/3OxWWEqc0Sk8XGwHqvA=
|
||||
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
|
||||
github.com/andybalholm/cascadia v1.3.2/go.mod h1:7gtRlve5FxPPgIgX36uWBX58OdBsSS6lUvCFb+h7KvU=
|
||||
github.com/apparentlymart/go-dump v0.0.0-20180507223929-23540a00eaa3/go.mod h1:oL81AME2rN47vu18xqj1S1jPIPuN7afo62yKTNn3XMM=
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw=
|
||||
github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
|
@ -160,8 +148,6 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
|
|||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4=
|
||||
github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
|
@ -172,8 +158,6 @@ github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QX
|
|||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
|
||||
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/godbus/dbus v4.1.0+incompatible h1:WqqLRTsQic3apZUK9qC5sGNfXthmPXzUZ7nQPrNITa4=
|
||||
|
@ -250,8 +234,6 @@ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
|
|||
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/hcl/v2 v2.16.1 h1:BwuxEMD/tsYgbhIW7UuI3crjovf3MzuFWiVgiv57iHg=
|
||||
github.com/hashicorp/hcl/v2 v2.16.1/go.mod h1:JRmR89jycNkrrqnMmvPDMd56n1rQJ2Q6KocSLCMCXng=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
|
||||
github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
|
||||
|
@ -284,9 +266,6 @@ github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
|||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348 h1:MtvEpTB6LX3vkb4ax0b5D2DHbNAUsen0Gx5wZoq3lV4=
|
||||
github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
|
@ -303,8 +282,8 @@ github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
|||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM=
|
||||
github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
|
@ -313,8 +292,6 @@ github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceT
|
|||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0=
|
||||
github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0=
|
||||
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
|
||||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
|
@ -369,8 +346,6 @@ github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf
|
|||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.9.2 h1:oxx1eChJGI6Uks2ZC4W1zpLlVgqB8ner4EuQwV4Ik1Y=
|
||||
|
@ -415,10 +390,8 @@ github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4d
|
|||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.24.4 h1:0gyJJEBYtCV87zI/x2nZCPyDxD51K6xM8SkwjHFCNEU=
|
||||
github.com/urfave/cli/v2 v2.24.4/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc=
|
||||
github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5 h1:5gO0H1iULLWGhs2H5tbAHIZTV8/cYafcFOr9znI5mJU=
|
||||
github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc=
|
||||
github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0 h1:y09buUbR+b5aycVFQs/g70pqKVZNBmxwAhO7/IwNM9g=
|
||||
github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
|
@ -426,9 +399,6 @@ github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRT
|
|||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY=
|
||||
github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA=
|
||||
github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
|
|
|
@ -19,6 +19,7 @@ package app
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"net/url"
|
||||
|
@ -76,10 +77,12 @@ const (
|
|||
flagNoWindow = "no-window"
|
||||
flagParentPID = "parent-pid"
|
||||
flagSoftwareRenderer = "software-renderer"
|
||||
flagSessionID = "session-id"
|
||||
)
|
||||
|
||||
const (
|
||||
appUsage = "Proton Mail IMAP and SMTP Bridge"
|
||||
appUsage = "Proton Mail IMAP and SMTP Bridge"
|
||||
appShortName = "bridge"
|
||||
)
|
||||
|
||||
func New() *cli.App {
|
||||
|
@ -150,6 +153,10 @@ func New() *cli.App {
|
|||
Hidden: true,
|
||||
Value: false,
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: flagSessionID,
|
||||
Hidden: true,
|
||||
},
|
||||
}
|
||||
|
||||
app.Action = run
|
||||
|
@ -183,6 +190,11 @@ func run(c *cli.Context) error {
|
|||
exe = os.Args[0]
|
||||
}
|
||||
|
||||
var logCloser io.Closer
|
||||
defer func() {
|
||||
_ = logging.Close(logCloser)
|
||||
}()
|
||||
|
||||
// Restart the app if requested.
|
||||
return withRestarter(exe, func(restarter *restarter.Restarter) error {
|
||||
// Handle crashes with various actions.
|
||||
|
@ -199,7 +211,9 @@ func run(c *cli.Context) error {
|
|||
}
|
||||
|
||||
// Initialize logging.
|
||||
return withLogging(c, crashHandler, locations, func() error {
|
||||
return withLogging(c, crashHandler, locations, func(closer io.Closer) error {
|
||||
logCloser = closer
|
||||
|
||||
// If there was an error during migration, log it now.
|
||||
if migrationErr != nil {
|
||||
logrus.WithError(migrationErr).Error("Failed to migrate old app data")
|
||||
|
@ -298,7 +312,7 @@ func withSingleInstance(settingPath, lockFile string, version *semver.Version, f
|
|||
}
|
||||
|
||||
// Initialize our logging system.
|
||||
func withLogging(c *cli.Context, crashHandler *crash.Handler, locations *locations.Locations, fn func() error) error {
|
||||
func withLogging(c *cli.Context, crashHandler *crash.Handler, locations *locations.Locations, fn func(closer io.Closer) error) error {
|
||||
logrus.Debug("Initializing logging")
|
||||
defer logrus.Debug("Logging stopped")
|
||||
|
||||
|
@ -311,12 +325,21 @@ func withLogging(c *cli.Context, crashHandler *crash.Handler, locations *locatio
|
|||
logrus.WithField("path", logsPath).Debug("Received logs path")
|
||||
|
||||
// Initialize logging.
|
||||
if err := logging.Init(logsPath, c.String(flagLogLevel)); err != nil {
|
||||
sessionID := logging.NewSessionIDFromString(c.String(flagSessionID))
|
||||
var closer io.Closer
|
||||
if closer, err = logging.Init(
|
||||
logsPath,
|
||||
sessionID,
|
||||
logging.BridgeShortAppName,
|
||||
logging.DefaultMaxLogFileSize,
|
||||
logging.DefaultPruningSize,
|
||||
c.String(flagLogLevel),
|
||||
); err != nil {
|
||||
return fmt.Errorf("could not initialize logging: %w", err)
|
||||
}
|
||||
|
||||
// Ensure we dump a stack trace if we crash.
|
||||
crashHandler.AddRecoveryAction(logging.DumpStackTrace(logsPath))
|
||||
crashHandler.AddRecoveryAction(logging.DumpStackTrace(logsPath, sessionID, appShortName))
|
||||
|
||||
logrus.
|
||||
WithField("appName", constants.FullAppName).
|
||||
|
@ -329,7 +352,7 @@ func withLogging(c *cli.Context, crashHandler *crash.Handler, locations *locatio
|
|||
WithField("SentryID", sentry.GetProtectedHostname()).
|
||||
Info("Run app")
|
||||
|
||||
return fn()
|
||||
return fn(closer)
|
||||
}
|
||||
|
||||
// WithLocations provides access to locations where we store our files.
|
||||
|
|
|
@ -44,7 +44,7 @@ import (
|
|||
// deleteOldGoIMAPFiles Set with `-ldflags -X app.deleteOldGoIMAPFiles=true` to enable cleanup of old imap cache data.
|
||||
var deleteOldGoIMAPFiles bool //nolint:gochecknoglobals
|
||||
|
||||
// withBridge creates creates and tears down the bridge.
|
||||
// withBridge creates and tears down the bridge.
|
||||
func withBridge(
|
||||
c *cli.Context,
|
||||
exe string,
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -300,8 +301,11 @@ func TestBridge_UserAgentFromSMTPClient(t *testing.T) {
|
|||
string(info.BridgePass)),
|
||||
))
|
||||
|
||||
currentUserAgent = b.GetCurrentUserAgent()
|
||||
require.Contains(t, currentUserAgent, "UnknownClient/0.0.1")
|
||||
require.Eventually(t, func() bool {
|
||||
currentUserAgent = b.GetCurrentUserAgent()
|
||||
|
||||
return strings.Contains(currentUserAgent, "UnknownClient/0.0.1")
|
||||
}, time.Minute, 5*time.Second)
|
||||
})
|
||||
})
|
||||
}
|
||||
|
|
|
@ -18,13 +18,8 @@
|
|||
package bridge
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
|
@ -34,8 +29,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
MaxTotalAttachmentSize = 7 * (1 << 20)
|
||||
MaxCompressedFilesCount = 6
|
||||
DefaultMaxBugReportZipSize = 7 * 1024 * 1024
|
||||
DefaultMaxSessionCountForBugReport = 10
|
||||
)
|
||||
|
||||
func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, description, username, email, client string, attachLogs bool) error {
|
||||
|
@ -51,54 +46,25 @@ func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, descript
|
|||
}
|
||||
}
|
||||
|
||||
var atts []proton.ReportBugAttachment
|
||||
var attachment []proton.ReportBugAttachment
|
||||
|
||||
if attachLogs {
|
||||
logs, err := getMatchingLogs(bridge.locator, func(filename string) bool {
|
||||
return logging.MatchLogName(filename) && !logging.MatchStackTraceName(filename)
|
||||
})
|
||||
logsPath, err := bridge.locator.ProvideLogsPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
crashes, err := getMatchingLogs(bridge.locator, func(filename string) bool {
|
||||
return logging.MatchLogName(filename) && logging.MatchStackTraceName(filename)
|
||||
})
|
||||
buffer, err := logging.ZipLogsForBugReport(logsPath, DefaultMaxSessionCountForBugReport, DefaultMaxBugReportZipSize)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
guiLogs, err := getMatchingLogs(bridge.locator, func(filename string) bool {
|
||||
return logging.MatchGUILogName(filename) && !logging.MatchStackTraceName(filename)
|
||||
})
|
||||
body, err := io.ReadAll(buffer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var matchFiles []string
|
||||
|
||||
// Include bridge logs, up to a maximum amount.
|
||||
matchFiles = append(matchFiles, logs[max(0, len(logs)-(MaxCompressedFilesCount/2)):]...)
|
||||
|
||||
// Include crash logs, up to a maximum amount.
|
||||
matchFiles = append(matchFiles, crashes[max(0, len(crashes)-(MaxCompressedFilesCount/2)):]...)
|
||||
|
||||
// bridge-gui keeps just one small (~ 1kb) log file; we always include it.
|
||||
if len(guiLogs) > 0 {
|
||||
matchFiles = append(matchFiles, guiLogs[len(guiLogs)-1])
|
||||
}
|
||||
|
||||
archive, err := zipFiles(matchFiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(archive)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
atts = append(atts, proton.ReportBugAttachment{
|
||||
attachment = append(attachment, proton.ReportBugAttachment{
|
||||
Name: "logs.zip",
|
||||
Filename: "logs.zip",
|
||||
MIMEType: "application/zip",
|
||||
|
@ -125,116 +91,5 @@ func (bridge *Bridge) ReportBug(ctx context.Context, osType, osVersion, descript
|
|||
|
||||
Username: account,
|
||||
Email: email,
|
||||
}, atts...)
|
||||
}
|
||||
|
||||
func max(a, b int) int {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func getMatchingLogs(locator Locator, filenameMatchFunc func(string) bool) (filenames []string, err error) {
|
||||
logsPath, err := locator.ProvideLogsPath()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
files, err := os.ReadDir(logsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var matchFiles []string
|
||||
|
||||
for _, file := range files {
|
||||
if filenameMatchFunc(file.Name()) {
|
||||
matchFiles = append(matchFiles, filepath.Join(logsPath, file.Name()))
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(matchFiles) // Sorted by timestamp: oldest first.
|
||||
|
||||
return matchFiles, nil
|
||||
}
|
||||
|
||||
type limitedBuffer struct {
|
||||
capacity int
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
func newLimitedBuffer(capacity int) *limitedBuffer {
|
||||
return &limitedBuffer{
|
||||
capacity: capacity,
|
||||
buf: bytes.NewBuffer(make([]byte, 0, capacity)),
|
||||
}
|
||||
}
|
||||
|
||||
func (b *limitedBuffer) Write(p []byte) (n int, err error) {
|
||||
if len(p)+b.buf.Len() > b.capacity {
|
||||
return 0, ErrSizeTooLarge
|
||||
}
|
||||
|
||||
return b.buf.Write(p)
|
||||
}
|
||||
|
||||
func (b *limitedBuffer) Read(p []byte) (n int, err error) {
|
||||
return b.buf.Read(p)
|
||||
}
|
||||
|
||||
func zipFiles(filenames []string) (io.Reader, error) {
|
||||
if len(filenames) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
buf := newLimitedBuffer(MaxTotalAttachmentSize)
|
||||
|
||||
w := zip.NewWriter(buf)
|
||||
defer w.Close() //nolint:errcheck
|
||||
|
||||
for _, file := range filenames {
|
||||
if err := addFileToZip(file, w); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf, nil
|
||||
}
|
||||
|
||||
func addFileToZip(filename string, writer *zip.Writer) error {
|
||||
fileReader, err := os.Open(filepath.Clean(filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fileReader.Close() //nolint:errcheck,gosec
|
||||
|
||||
fileInfo, err := fileReader.Stat()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header, err := zip.FileInfoHeader(fileInfo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header.Method = zip.Deflate
|
||||
header.Name = filepath.Base(filename)
|
||||
|
||||
fileWriter, err := writer.CreateHeader(header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(fileWriter, fileReader); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return fileReader.Close()
|
||||
}, attachment...)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,297 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package bridge
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/gluon/rfc822"
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/user"
|
||||
"github.com/bradenaw/juniper/iterator"
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
goimap "github.com/emersion/go-imap"
|
||||
goimapclient "github.com/emersion/go-imap/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type CheckClientStateResult struct {
|
||||
MissingMessages map[string]map[string]user.DiagMailboxMessage
|
||||
}
|
||||
|
||||
func (c *CheckClientStateResult) AddMissingMessage(userID string, message user.DiagMailboxMessage) {
|
||||
v, ok := c.MissingMessages[userID]
|
||||
if !ok {
|
||||
c.MissingMessages[userID] = map[string]user.DiagMailboxMessage{message.ID: message}
|
||||
} else {
|
||||
v[message.ID] = message
|
||||
}
|
||||
}
|
||||
|
||||
// CheckClientState checks the current IMAP client reported state against the proton server state and reports
|
||||
// anything that is out of place.
|
||||
func (bridge *Bridge) CheckClientState(ctx context.Context, checkFlags bool, progressCB func(string)) (CheckClientStateResult, error) {
|
||||
bridge.usersLock.RLock()
|
||||
defer bridge.usersLock.RUnlock()
|
||||
|
||||
users := maps.Values(bridge.users)
|
||||
|
||||
result := CheckClientStateResult{
|
||||
MissingMessages: make(map[string]map[string]user.DiagMailboxMessage),
|
||||
}
|
||||
|
||||
for _, usr := range users {
|
||||
if progressCB != nil {
|
||||
progressCB(fmt.Sprintf("Checking state for user %v", usr.Name()))
|
||||
}
|
||||
log := logrus.WithField("user", usr.Name()).WithField("diag", "state-check")
|
||||
log.Debug("Retrieving all server metadata")
|
||||
meta, err := usr.GetDiagnosticMetadata(ctx)
|
||||
if err != nil {
|
||||
return result, err
|
||||
}
|
||||
|
||||
success := true
|
||||
|
||||
if len(meta.Metadata) != len(meta.MessageIDs) {
|
||||
log.Errorf("Metadata (%v) and message(%v) list sizes do not match", len(meta.Metadata), len(meta.MessageIDs))
|
||||
}
|
||||
|
||||
log.Debug("Building state")
|
||||
state, err := meta.BuildMailboxToMessageMap(usr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to build state")
|
||||
return result, err
|
||||
}
|
||||
|
||||
info, err := bridge.GetUserInfo(usr.ID())
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to get user info")
|
||||
return result, err
|
||||
}
|
||||
|
||||
addr := fmt.Sprintf("127.0.0.1:%v", bridge.GetIMAPPort())
|
||||
|
||||
for account, mboxMap := range state {
|
||||
if progressCB != nil {
|
||||
progressCB(fmt.Sprintf("Checking state for user %v's account '%v'", usr.Name(), account))
|
||||
}
|
||||
if err := func(account string, mboxMap user.AccountMailboxMap) error {
|
||||
client, err := goimapclient.Dial(addr)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to connect to imap client")
|
||||
return err
|
||||
}
|
||||
|
||||
defer func() {
|
||||
_ = client.Logout()
|
||||
}()
|
||||
|
||||
if err := client.Login(account, string(info.BridgePass)); err != nil {
|
||||
return fmt.Errorf("failed to login for user %v:%w", usr.Name(), err)
|
||||
}
|
||||
|
||||
log := log.WithField("account", account)
|
||||
for mboxName, messageList := range mboxMap {
|
||||
log := log.WithField("mbox", mboxName)
|
||||
status, err := client.Select(mboxName, true)
|
||||
if err != nil {
|
||||
log.WithError(err).Errorf("Failed to select mailbox %v", messageList)
|
||||
return fmt.Errorf("failed to select '%v':%w", mboxName, err)
|
||||
}
|
||||
|
||||
log.Debug("Checking message count")
|
||||
|
||||
if int(status.Messages) != len(messageList) {
|
||||
success = false
|
||||
log.Errorf("Message count doesn't match, got '%v' expected '%v'", status.Messages, len(messageList))
|
||||
}
|
||||
|
||||
ids, err := clientGetMessageIDs(client, mboxName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get message ids for mbox '%v': %w", mboxName, err)
|
||||
}
|
||||
|
||||
for _, msg := range messageList {
|
||||
imapFlags, ok := ids[msg.ID]
|
||||
if !ok {
|
||||
if meta.FailedMessageIDs.Contains(msg.ID) {
|
||||
log.Warningf("Missing message '%v', but it is part of failed message set", msg.ID)
|
||||
} else {
|
||||
log.Errorf("Missing message '%v'", msg.ID)
|
||||
}
|
||||
|
||||
result.AddMissingMessage(msg.UserID, msg)
|
||||
continue
|
||||
}
|
||||
|
||||
if checkFlags {
|
||||
if !imapFlags.Equals(msg.Flags) {
|
||||
log.Errorf("Message '%v' flags do mot match, got=%v, expected=%v",
|
||||
msg.ID,
|
||||
imapFlags.ToSlice(),
|
||||
msg.Flags.ToSlice(),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !success {
|
||||
log.Errorf("State does not match")
|
||||
} else {
|
||||
log.Info("State matches")
|
||||
}
|
||||
|
||||
return nil
|
||||
}(account, mboxMap); err != nil {
|
||||
return result, err
|
||||
}
|
||||
}
|
||||
|
||||
// Check for orphaned messages (only present in All Mail)
|
||||
if progressCB != nil {
|
||||
progressCB(fmt.Sprintf("Checking user %v for orphans", usr.Name()))
|
||||
}
|
||||
log.Debugf("Checking for orphans")
|
||||
|
||||
for _, m := range meta.Metadata {
|
||||
filteredLabels := xslices.Filter(m.LabelIDs, func(t string) bool {
|
||||
switch t {
|
||||
case proton.AllMailLabel:
|
||||
return false
|
||||
case proton.AllSentLabel:
|
||||
return false
|
||||
case proton.AllDraftsLabel:
|
||||
return false
|
||||
case proton.OutboxLabel:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
})
|
||||
|
||||
if len(filteredLabels) == 0 {
|
||||
log.Warnf("Message %v is only present in All Mail (Subject=%v)", m.ID, m.Subject)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (bridge *Bridge) DebugDownloadFailedMessages(
|
||||
ctx context.Context,
|
||||
result CheckClientStateResult,
|
||||
exportPath string,
|
||||
progressCB func(string, int, int),
|
||||
) error {
|
||||
bridge.usersLock.RLock()
|
||||
defer bridge.usersLock.RUnlock()
|
||||
|
||||
for userID, messages := range result.MissingMessages {
|
||||
usr, ok := bridge.users[userID]
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to find user with id %v", userID)
|
||||
}
|
||||
|
||||
userDir := filepath.Join(exportPath, userID)
|
||||
if err := os.MkdirAll(userDir, 0o700); err != nil {
|
||||
return fmt.Errorf("failed to create directory '%v': %w", userDir, err)
|
||||
}
|
||||
|
||||
if err := usr.DebugDownloadMessages(ctx, userDir, messages, progressCB); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func clientGetMessageIDs(client *goimapclient.Client, mailbox string) (map[string]imap.FlagSet, error) {
|
||||
status, err := client.Select(mailbox, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if status.Messages == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
resCh := make(chan *goimap.Message)
|
||||
|
||||
section, err := goimap.ParseBodySectionName("BODY[HEADER]")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fetchItems := []goimap.FetchItem{"BODY[HEADER]", goimap.FetchFlags}
|
||||
|
||||
seq, err := goimap.ParseSeqSet("1:*")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := client.Fetch(
|
||||
seq,
|
||||
fetchItems,
|
||||
resCh,
|
||||
); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}()
|
||||
|
||||
messages := iterator.Collect(iterator.Chan(resCh))
|
||||
|
||||
ids := make(map[string]imap.FlagSet, len(messages))
|
||||
|
||||
for i, m := range messages {
|
||||
literal, err := io.ReadAll(m.GetBody(section))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
header, err := rfc822.NewHeader(literal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse header for msg %v: %w", i, err)
|
||||
}
|
||||
|
||||
internalID, ok := header.GetChecked("X-Pm-Internal-Id")
|
||||
if !ok {
|
||||
logrus.Errorf("Message %v does not have internal id", internalID)
|
||||
continue
|
||||
}
|
||||
|
||||
messageFlags := imap.NewFlagSet(m.Flags...)
|
||||
|
||||
// Recent and Deleted are not part of the proton flag set.
|
||||
messageFlags.RemoveFromSelf("\\Recent")
|
||||
messageFlags.RemoveFromSelf("\\Deleted")
|
||||
|
||||
ids[internalID] = messageFlags
|
||||
}
|
||||
|
||||
return ids, nil
|
||||
}
|
|
@ -0,0 +1,175 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package bridge_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/gluon/rfc822"
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
"github.com/ProtonMail/go-proton-api/server"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/bridge"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/events"
|
||||
go_imap "github.com/emersion/go-imap"
|
||||
"github.com/emersion/go-sasl"
|
||||
"github.com/emersion/go-smtp"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBridge_HandleDraftsSendFromOtherClient(t *testing.T) {
|
||||
getGluonHeaderID := func(literal []byte) (string, string) {
|
||||
h, err := rfc822.NewHeader(literal)
|
||||
require.NoError(t, err)
|
||||
|
||||
gluonID, ok := h.GetChecked("X-Pm-Gluon-Id")
|
||||
require.True(t, ok)
|
||||
|
||||
externalID, ok := h.GetChecked("Message-Id")
|
||||
require.True(t, ok)
|
||||
|
||||
return gluonID, externalID
|
||||
}
|
||||
|
||||
withEnv(t, func(ctx context.Context, s *server.Server, netCtl *proton.NetCtl, locator bridge.Locator, storeKey []byte) {
|
||||
_, _, err := s.CreateUser("imap", password)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, _, err = s.CreateUser("bar", password)
|
||||
require.NoError(t, err)
|
||||
|
||||
// The initial user should be fully synced.
|
||||
withBridge(ctx, t, s.GetHostURL(), netCtl, locator, storeKey, func(b *bridge.Bridge, _ *bridge.Mocks) {
|
||||
waiter := waitForIMAPServerReady(b)
|
||||
defer waiter.Done()
|
||||
|
||||
syncCh, done := chToType[events.Event, events.SyncFinished](b.GetEvents(events.SyncFinished{}))
|
||||
defer done()
|
||||
|
||||
userID, err := b.LoginFull(ctx, "imap", password, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, userID, (<-syncCh).UserID)
|
||||
waiter.Wait()
|
||||
|
||||
info, err := b.GetUserInfo(userID)
|
||||
require.NoError(t, err)
|
||||
require.True(t, info.State == bridge.Connected)
|
||||
|
||||
client, err := eventuallyDial(fmt.Sprintf("%v:%v", constants.Host, b.GetIMAPPort()))
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, client.Login(info.Addresses[0], string(info.BridgePass)))
|
||||
defer func() { _ = client.Logout() }()
|
||||
|
||||
// Create first draft in client.
|
||||
literal := fmt.Sprintf(`From: %v
|
||||
To: %v
|
||||
Date: Fri, 3 Feb 2023 01:04:32 +0100
|
||||
Subject: Foo
|
||||
|
||||
Hello
|
||||
`, info.Addresses[0], "bar@proton.local")
|
||||
|
||||
require.NoError(t, client.Append("Drafts", nil, time.Now(), strings.NewReader(literal)))
|
||||
// Verify the draft is available in client.
|
||||
require.Eventually(t, func() bool {
|
||||
status, err := client.Status("Drafts", []go_imap.StatusItem{go_imap.StatusMessages})
|
||||
require.NoError(t, err)
|
||||
return status.Messages == 1
|
||||
}, 2*time.Second, time.Second)
|
||||
|
||||
// Retrieve the new literal so we can have the Proton Message ID.
|
||||
messages, err := clientFetch(client, "Drafts")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(messages))
|
||||
|
||||
newLiteral, err := io.ReadAll(messages[0].GetBody(must(go_imap.ParseBodySectionName("BODY[]"))))
|
||||
require.NoError(t, err)
|
||||
logrus.Info(string(newLiteral))
|
||||
|
||||
newLiteralID, newLiteralExternID := getGluonHeaderID(newLiteral)
|
||||
|
||||
// Modify new literal.
|
||||
newLiteralModified := append(newLiteral, []byte(" world from client2")...) //nolint:gocritic
|
||||
|
||||
func() {
|
||||
smtpClient, err := smtp.Dial(net.JoinHostPort(constants.Host, fmt.Sprint(b.GetSMTPPort())))
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = smtpClient.Close() }()
|
||||
|
||||
// Upgrade to TLS.
|
||||
require.NoError(t, smtpClient.StartTLS(&tls.Config{InsecureSkipVerify: true}))
|
||||
|
||||
// Authorize with SASL PLAIN.
|
||||
require.NoError(t, smtpClient.Auth(sasl.NewPlainClient(
|
||||
info.Addresses[0],
|
||||
info.Addresses[0],
|
||||
string(info.BridgePass)),
|
||||
))
|
||||
|
||||
// Send the message.
|
||||
require.NoError(t, smtpClient.SendMail(
|
||||
info.Addresses[0],
|
||||
[]string{"bar@proton.local"},
|
||||
bytes.NewReader(newLiteralModified),
|
||||
))
|
||||
}()
|
||||
|
||||
// Append message to Sent as the imap client would.
|
||||
require.NoError(t, client.Append("Sent", nil, time.Now(), strings.NewReader(literal)))
|
||||
|
||||
// Verify the sent message gets updated with the new literal.
|
||||
require.Eventually(t, func() bool {
|
||||
// Check if sent message matches the latest draft.
|
||||
messagesClient1, err := clientFetch(client, "Sent", "BODY[TEXT]", "BODY[]")
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(messagesClient1) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
sentLiteral, err := io.ReadAll(messagesClient1[0].GetBody(must(go_imap.ParseBodySectionName("BODY[]"))))
|
||||
require.NoError(t, err)
|
||||
|
||||
sentLiteralID, sentLiteralExternID := getGluonHeaderID(sentLiteral)
|
||||
|
||||
sentLiteralText, err := io.ReadAll(messagesClient1[0].GetBody(must(go_imap.ParseBodySectionName("BODY[TEXT]"))))
|
||||
require.NoError(t, err)
|
||||
|
||||
sentLiteralStr := string(sentLiteralText)
|
||||
|
||||
literalMatches := sentLiteralStr == "Hello\r\n world from client2\r\n"
|
||||
|
||||
idIsDifferent := sentLiteralID != newLiteralID
|
||||
|
||||
externIDMatches := sentLiteralExternID == newLiteralExternID
|
||||
|
||||
return literalMatches && idIsDifferent && externIDMatches
|
||||
}, 2*time.Second, time.Second)
|
||||
})
|
||||
}, server.WithMessageDedup())
|
||||
}
|
|
@ -399,6 +399,10 @@ func createNumMessages(ctx context.Context, t *testing.T, c *proton.Client, addr
|
|||
}
|
||||
|
||||
func createMessages(ctx context.Context, t *testing.T, c *proton.Client, addrID, labelID string, messages ...[]byte) []string {
|
||||
return createMessagesWithFlags(ctx, t, c, addrID, labelID, 0, messages...)
|
||||
}
|
||||
|
||||
func createMessagesWithFlags(ctx context.Context, t *testing.T, c *proton.Client, addrID, labelID string, flags proton.MessageFlag, messages ...[]byte) []string {
|
||||
user, err := c.GetUser(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -417,6 +421,13 @@ func createMessages(ctx context.Context, t *testing.T, c *proton.Client, addrID,
|
|||
_, ok := addrKRs[addrID]
|
||||
require.True(t, ok)
|
||||
|
||||
var msgFlags proton.MessageFlag
|
||||
if flags == 0 {
|
||||
msgFlags = proton.MessageFlagReceived
|
||||
} else {
|
||||
msgFlags = flags
|
||||
}
|
||||
|
||||
str, err := c.ImportMessages(
|
||||
ctx,
|
||||
addrKRs[addrID],
|
||||
|
@ -427,7 +438,7 @@ func createMessages(ctx context.Context, t *testing.T, c *proton.Client, addrID,
|
|||
Metadata: proton.ImportMetadata{
|
||||
AddressID: addrID,
|
||||
LabelIDs: []string{labelID},
|
||||
Flags: proton.MessageFlagReceived,
|
||||
Flags: msgFlags,
|
||||
},
|
||||
Message: message,
|
||||
}
|
||||
|
|
|
@ -70,9 +70,11 @@ func prepareMobileConfig(
|
|||
password []byte,
|
||||
) *mobileconfig.Config {
|
||||
return &mobileconfig.Config{
|
||||
DisplayName: username,
|
||||
EmailAddress: addresses,
|
||||
Identifier: "protonmail " + username + strconv.FormatInt(time.Now().Unix(), 10),
|
||||
DisplayName: username,
|
||||
EmailAddress: addresses,
|
||||
AccountName: username,
|
||||
AccountDescription: username,
|
||||
Identifier: "protonmail " + username + strconv.FormatInt(time.Now().Unix(), 10),
|
||||
IMAP: &mobileconfig.IMAP{
|
||||
Hostname: hostname,
|
||||
Port: imapPort,
|
||||
|
|
|
@ -0,0 +1,4 @@
|
|||
# The following fix an issue happening using LLDB with OpenSSL 3.1 on ARM64 architecture. (GODT-2680)
|
||||
# WARNING: this file is ignored if you do not enable reading lldb config from cwd in ~/.lldbinit (`settings set target.load-cwd-lldbinit true`)
|
||||
settings set platform.plugin.darwin.ignored-exceptions EXC_BAD_INSTRUCTION
|
||||
process handle SIGILL -n false -p true -s false
|
|
@ -0,0 +1,4 @@
|
|||
# The following fix an issue happening using LLDB with OpenSSL 3.1 on ARM64 architecture. (GODT-2680)
|
||||
# WARNING: this file is ignored if you do not enable reading lldb config from cwd in ~/.lldbinit (`settings set target.load-cwd-lldbinit true`)
|
||||
settings set platform.plugin.darwin.ignored-exceptions EXC_BAD_INSTRUCTION
|
||||
process handle SIGILL -n false -p true -s false
|
|
@ -117,7 +117,27 @@ void AppController::restart(bool isCrashing) {
|
|||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] launcher The launcher.
|
||||
/// \param[in] args The launcher arguments.
|
||||
//****************************************************************************************************************************************************
|
||||
void AppController::setLauncherArgs(const QString &launcher, const QStringList &args) {
|
||||
launcher_ = launcher;
|
||||
launcherArgs_ = args;
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] sessionID The sessionID.
|
||||
//****************************************************************************************************************************************************
|
||||
void AppController::setSessionID(const QString &sessionID) {
|
||||
sessionID_ = sessionID;
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return The sessionID.
|
||||
//****************************************************************************************************************************************************
|
||||
QString AppController::sessionID() {
|
||||
return sessionID_;
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ class Exception;
|
|||
/// \brief App controller class.
|
||||
//****************************************************************************************************************************************************
|
||||
class AppController : public QObject {
|
||||
Q_OBJECT
|
||||
Q_OBJECT
|
||||
friend AppController &app();
|
||||
|
||||
public: // member functions.
|
||||
|
@ -52,10 +52,12 @@ public: // member functions.
|
|||
std::unique_ptr<bridgepp::Overseer> &bridgeOverseer() { return bridgeOverseer_; }; ///< Returns a reference the bridge overseer
|
||||
bridgepp::ProcessMonitor *bridgeMonitor() const; ///< Return the bridge worker.
|
||||
Settings &settings();; ///< Return the application settings.
|
||||
void setLauncherArgs(const QString &launcher, const QStringList &args);
|
||||
void setLauncherArgs(const QString &launcher, const QStringList &args); ///< Set the launcher arguments.
|
||||
void setSessionID(QString const &sessionID); ///< Set the sessionID.
|
||||
QString sessionID(); ///< Get the sessionID.
|
||||
|
||||
public slots:
|
||||
void onFatalError(bridgepp::Exception const& e); ///< Handle fatal errors.
|
||||
void onFatalError(bridgepp::Exception const &e); ///< Handle fatal errors.
|
||||
|
||||
private: // member functions
|
||||
AppController(); ///< Default constructor.
|
||||
|
@ -67,8 +69,9 @@ private: // data members
|
|||
std::unique_ptr<bridgepp::Log> log_; ///< The log.
|
||||
std::unique_ptr<bridgepp::Overseer> bridgeOverseer_; ///< The overseer for the bridge monitor worker.
|
||||
std::unique_ptr<Settings> settings_; ///< The application settings.
|
||||
QString launcher_;
|
||||
QStringList launcherArgs_;
|
||||
QString launcher_; ///< The launcher.
|
||||
QStringList launcherArgs_; ///< The launcher arguments.
|
||||
QString sessionID_; ///< The sessionID.
|
||||
};
|
||||
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
#include "Pch.h"
|
||||
#include "CommandLine.h"
|
||||
#include "Settings.h"
|
||||
#include <bridgepp/SessionID/SessionID.h>
|
||||
|
||||
|
||||
using namespace bridgepp;
|
||||
|
@ -142,5 +143,14 @@ CommandLineOptions parseCommandLine(int argc, char *argv[]) {
|
|||
|
||||
options.logLevel = parseLogLevel(argc, argv);
|
||||
|
||||
QString sessionID = parseGoCLIStringArgument(argc, argv, { "session-id" });
|
||||
if (sessionID.isEmpty()) {
|
||||
// The session ID was not passed to us on the command-line -> create one and add to the command-line for bridge
|
||||
sessionID = newSessionID();
|
||||
options.bridgeArgs.append("--session-id");
|
||||
options.bridgeArgs.append(sessionID);
|
||||
}
|
||||
app().setSessionID(sessionID);
|
||||
|
||||
return options;
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include "LogUtils.h"
|
||||
#include "BuildConfig.h"
|
||||
#include <bridgepp/Log/LogUtils.h>
|
||||
#include <bridgepp/BridgeUtils.h>
|
||||
|
||||
|
||||
using namespace bridgepp;
|
||||
|
@ -33,15 +32,10 @@ Log &initLog() {
|
|||
log.registerAsQtMessageHandler();
|
||||
log.setEchoInConsole(true);
|
||||
|
||||
// remove old gui log files
|
||||
QDir const logsDir(userLogsDir());
|
||||
for (QFileInfo const fileInfo: logsDir.entryInfoList({ "gui_v*.log" }, QDir::Filter::Files)) { // entryInfolist apparently only support wildcards, not regex.
|
||||
QFile(fileInfo.absoluteFilePath()).remove();
|
||||
}
|
||||
|
||||
// create new GUI log file
|
||||
QString error;
|
||||
if (!log.startWritingToFile(logsDir.absoluteFilePath(QString("gui_v%1_%2.log").arg(PROJECT_VER).arg(QDateTime::currentSecsSinceEpoch())), &error)) {
|
||||
if (!log.startWritingToFile(QDir(userLogsDir()).absoluteFilePath(QString("%1_gui_000_v%2_%3.log").arg(app().sessionID(),
|
||||
PROJECT_VER, PROJECT_TAG)), &error)) {
|
||||
log.error(error);
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ void QMLBackend::init(GRPCConfig const &serviceConfig) {
|
|||
app().grpc().setLog(&log);
|
||||
this->connectGrpcEvents();
|
||||
|
||||
app().grpc().connectToServer(bridgepp::userConfigDir(), serviceConfig, app().bridgeMonitor());
|
||||
app().grpc().connectToServer(app().sessionID(), bridgepp::userConfigDir(), serviceConfig, app().bridgeMonitor());
|
||||
app().log().info("Connected to backend via gRPC service.");
|
||||
|
||||
QString bridgeVer;
|
||||
|
@ -109,6 +109,12 @@ UserList const &QMLBackend::users() const {
|
|||
return *users_;
|
||||
}
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return the if bridge considers internet is on.
|
||||
//****************************************************************************************************************************************************
|
||||
bool QMLBackend::isInternetOn() const {
|
||||
return isInternetOn_;
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
|
@ -680,7 +686,7 @@ void QMLBackend::login(QString const &username, QString const &password) const {
|
|||
HANDLE_EXCEPTION(
|
||||
if (username.compare("coco@bandicoot", Qt::CaseInsensitive) == 0) {
|
||||
throw Exception("User requested bridge-gui to crash by trying to log as coco@bandicoot",
|
||||
"This error exists for test purposes and should be ignored.", __func__, tailOfLatestBridgeLog());
|
||||
"This error exists for test purposes and should be ignored.", __func__, tailOfLatestBridgeLog(app().sessionID()));
|
||||
}
|
||||
app().grpc().login(username, password);
|
||||
)
|
||||
|
@ -914,7 +920,6 @@ void QMLBackend::sendBadEventUserFeedback(QString const &userID, bool doResync)
|
|||
if (!badEventDisplayQueue_.isEmpty()) {
|
||||
// we introduce a small delay here, so that the user notices the dialog disappear and pops up again.
|
||||
QTimer::singleShot(500, [&]() { this->displayBadEventDialog(badEventDisplayQueue_.front()); });
|
||||
|
||||
}
|
||||
)
|
||||
}
|
||||
|
@ -989,6 +994,25 @@ void QMLBackend::setUpdateTrayIcon(QString const &stateString, QString const &st
|
|||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] isOn Does bridge consider internet as on.
|
||||
//****************************************************************************************************************************************************
|
||||
void QMLBackend::internetStatusChanged(bool isOn) {
|
||||
HANDLE_EXCEPTION(
|
||||
if (isInternetOn_ == isOn) {
|
||||
return;
|
||||
}
|
||||
|
||||
isInternetOn_ = isOn;
|
||||
if (isOn) {
|
||||
emit internetOn();
|
||||
} else {
|
||||
emit internetOff();
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] imapPort The IMAP port.
|
||||
/// \param[in] smtpPort The SMTP port.
|
||||
|
@ -1152,7 +1176,7 @@ void QMLBackend::connectGrpcEvents() {
|
|||
GRPCClient *client = &app().grpc();
|
||||
|
||||
// app events
|
||||
connect(client, &GRPCClient::internetStatus, this, [&](bool isOn) { if (isOn) { emit internetOn(); } else { emit internetOff(); }});
|
||||
connect(client, &GRPCClient::internetStatus, this, &QMLBackend::internetStatusChanged);
|
||||
connect(client, &GRPCClient::toggleAutostartFinished, this, &QMLBackend::toggleAutostartFinished);
|
||||
connect(client, &GRPCClient::resetFinished, this, &QMLBackend::onResetFinished);
|
||||
connect(client, &GRPCClient::reportBugFinished, this, &QMLBackend::reportBugFinished);
|
||||
|
|
|
@ -45,6 +45,7 @@ public: // member functions.
|
|||
void init(GRPCConfig const &serviceConfig); ///< Initialize the backend.
|
||||
bool waitForEventStreamReaderToFinish(qint32 timeoutMs); ///< Wait for the event stream reader to finish.
|
||||
UserList const& users() const; ///< Return the list of users
|
||||
bool isInternetOn() const; ///< Check if bridge considers internet as on.
|
||||
void showMainWindow(QString const &reason); ///< Show the main window.
|
||||
void showHelp(QString const &reason); ///< Show the help page.
|
||||
void showSettings(QString const &reason); ///< Show the settings page.
|
||||
|
@ -89,7 +90,6 @@ public: // Qt/QML properties. Note that the NOTIFY-er signal is required even fo
|
|||
Q_PROPERTY(UserList *users MEMBER users_ NOTIFY usersChanged)
|
||||
Q_PROPERTY(bool dockIconVisible READ dockIconVisible WRITE setDockIconVisible NOTIFY dockIconVisibleChanged)
|
||||
|
||||
|
||||
// Qt Property system setters & getters.
|
||||
bool showOnStartup() const; ///< Getter for the 'showOnStartup' property.
|
||||
void setShowSplashScreen(bool show); ///< Setter for the 'showSplashScreen' property.
|
||||
|
@ -198,6 +198,7 @@ public slots: // slots for functions that need to be processed locally.
|
|||
void setUpdateTrayIcon(QString const& stateString, QString const &statusIcon); ///< Set the tray icon to 'update' state.
|
||||
|
||||
public slots: // slot for signals received from gRPC that need transformation instead of simple forwarding
|
||||
void internetStatusChanged(bool isOn); ///< Check if bridge considers internet as on.
|
||||
void onMailServerSettingsChanged(int imapPort, int smtpPort, bool useSSLForIMAP, bool useSSLForSMTP); ///< Slot for the ConnectionModeChanged gRPC event.
|
||||
void onGenericError(bridgepp::ErrorInfo const &info); ///< Slot for generic errors received from the gRPC service.
|
||||
void onLoginFinished(QString const &userID, bool wasSignedOut); ///< Slot for LoginFinished gRPC event.
|
||||
|
@ -280,8 +281,9 @@ private: // data members
|
|||
int smtpPort_ { 0 }; ///< The cached value for the SMTP port.
|
||||
bool useSSLForIMAP_ { false }; ///< The cached value for useSSLForIMAP.
|
||||
bool useSSLForSMTP_ { false }; ///< The cached value for useSSLForSMTP.
|
||||
bool isInternetOn_ { true }; ///< Does bridge consider internet as on?
|
||||
QList<QString> badEventDisplayQueue_; ///< THe queue for displaying 'bad event feedback request dialog'.
|
||||
std::unique_ptr<TrayIcon> trayIcon_;
|
||||
std::unique_ptr<TrayIcon> trayIcon_; ///< The tray icon for the application.
|
||||
friend class AppController;
|
||||
};
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
<file>qml/icons/ic-card-identity.svg</file>
|
||||
<file>qml/icons/ic-check.svg</file>
|
||||
<file>qml/icons/ic-chevron-down.svg</file>
|
||||
<file>qml/icons/ic-chevron-right.svg</file>
|
||||
<file>qml/icons/ic-chevron-up.svg</file>
|
||||
<file>qml/icons/ic-cog-wheel.svg</file>
|
||||
<file>qml/icons/ic-connected.svg</file>
|
||||
|
|
|
@ -348,6 +348,7 @@ void TrayIcon::refreshContextMenu() {
|
|||
return;
|
||||
}
|
||||
|
||||
bool const internetOn = app().backend().isInternetOn();
|
||||
menu_->clear();
|
||||
menu_->addAction(statusIcon_, stateString_, []() {app().backend().showMainWindow("tray menu status clicked");});
|
||||
menu_->addSeparator();
|
||||
|
@ -359,7 +360,9 @@ void TrayIcon::refreshContextMenu() {
|
|||
User const &user = *users.get(i);
|
||||
UserState const state = user.state();
|
||||
auto action = new QAction(user.primaryEmailOrUsername());
|
||||
action->setIcon((UserState::Connected == state) ? greenDot_ : (UserState::Locked == state ? orangeDot_ : greyDot_));
|
||||
if (internetOn) {
|
||||
action->setIcon((UserState::Connected == state) ? greenDot_ : (UserState::Locked == state ? orangeDot_ : greyDot_));
|
||||
}
|
||||
action->setData(user.id());
|
||||
connect(action, &QAction::triggered, this, &TrayIcon::onUserClicked);
|
||||
if ((i < 10) && onMac) {
|
||||
|
|
|
@ -305,22 +305,23 @@ int main(int argc, char *argv[]) {
|
|||
// these outputs and output them on the command-line.
|
||||
log.info(QString("New Sentry reporter - id: %1.").arg(getProtectedHostname()));
|
||||
|
||||
QString bridgeexec;
|
||||
QString const &sessionID = app().sessionID();
|
||||
QString bridgeExe;
|
||||
if (!cliOptions.attach) {
|
||||
if (isBridgeRunning()) {
|
||||
throw Exception("An orphan instance of bridge is already running. Please terminate it and relaunch the application.",
|
||||
QString(), __FUNCTION__, tailOfLatestBridgeLog());
|
||||
QString(), __FUNCTION__, tailOfLatestBridgeLog(sessionID));
|
||||
}
|
||||
|
||||
// before launching bridge, we remove any trailing service config file, because we need to make sure we get a newly generated one.
|
||||
FocusGRPCClient::removeServiceConfigFile(configDir);
|
||||
GRPCClient::removeServiceConfigFile(configDir);
|
||||
bridgeexec = launchBridge(cliOptions.bridgeArgs);
|
||||
bridgeExe = launchBridge(cliOptions.bridgeArgs);
|
||||
}
|
||||
|
||||
log.info(QString("Retrieving gRPC service configuration from '%1'").arg(QDir::toNativeSeparators(grpcServerConfigPath(configDir))));
|
||||
app().backend().init(GRPCClient::waitAndRetrieveServiceConfig(configDir, cliOptions.attach ? 0 : grpcServiceConfigWaitDelayMs,
|
||||
app().bridgeMonitor()));
|
||||
app().backend().init(GRPCClient::waitAndRetrieveServiceConfig(sessionID, configDir,
|
||||
cliOptions.attach ? 0 : grpcServiceConfigWaitDelayMs, app().bridgeMonitor()));
|
||||
if (!cliOptions.attach) {
|
||||
GRPCClient::removeServiceConfigFile(configDir);
|
||||
}
|
||||
|
@ -378,9 +379,9 @@ int main(int argc, char *argv[]) {
|
|||
QStringList args = cliOptions.bridgeGuiArgs;
|
||||
args.append(waitFlag);
|
||||
args.append(mainexec);
|
||||
if (!bridgeexec.isEmpty()) {
|
||||
if (!bridgeExe.isEmpty()) {
|
||||
args.append(waitFlag);
|
||||
args.append(bridgeexec);
|
||||
args.append(bridgeExe);
|
||||
}
|
||||
app().setLauncherArgs(cliOptions.launcher, args);
|
||||
result = QGuiApplication::exec();
|
||||
|
|
|
@ -90,9 +90,9 @@ SettingsView {
|
|||
|
||||
RowLayout {
|
||||
ColorImage {
|
||||
Layout.alignment: Qt.AlignTop
|
||||
Layout.alignment: Qt.AlignCenter
|
||||
|
||||
source: root._isAdvancedShown ? "/qml/icons/ic-chevron-up.svg" : "/qml/icons/ic-chevron-down.svg"
|
||||
source: root._isAdvancedShown ? "/qml/icons/ic-chevron-down.svg" : "/qml/icons/ic-chevron-right.svg"
|
||||
color: root.colorScheme.interaction_norm
|
||||
height: root.colorScheme.body_font_size
|
||||
sourceSize.height: root.colorScheme.body_font_size
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg width="100%" height="100%" viewBox="0 0 16 16" version="1.1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" xml:space="preserve" xmlns:serif="http://www.serif.com/" style="fill-rule:evenodd;clip-rule:evenodd;stroke-linejoin:round;stroke-miterlimit:2;">
|
||||
<g transform="matrix(6.12323e-17,-1,1,6.12323e-17,-0.800005,16.8)">
|
||||
<g id="ic-chevron-down">
|
||||
<path id="icon" d="M2.3,6.3L8,12L13.7,6.3L13,5.6L8,10.58L3,5.6L2.3,6.3Z" style="fill:rgb(23,24,28);"/>
|
||||
</g>
|
||||
</g>
|
||||
</svg>
|
After Width: | Height: | Size: 692 B |
|
@ -32,9 +32,9 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
|||
|
||||
if (NOT DEFINED BRIDGE_APP_VERSION)
|
||||
message(FATAL_ERROR "BRIDGE_APP_VERSION is not defined.")
|
||||
else()
|
||||
else ()
|
||||
message(STATUS "Bridge version is ${BRIDGE_APP_VERSION}")
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
|
||||
#****************************************************************************************************************************************************
|
||||
|
@ -148,6 +148,7 @@ add_library(bridgepp
|
|||
bridgepp/Log/Log.h bridgepp/Log/Log.cpp
|
||||
bridgepp/Log/LogUtils.h bridgepp/Log/LogUtils.cpp
|
||||
bridgepp/ProcessMonitor.cpp bridgepp/ProcessMonitor.h
|
||||
bridgepp/SessionID/SessionID.cpp bridgepp/SessionID/SessionID.h
|
||||
bridgepp/User/User.cpp bridgepp/User/User.h
|
||||
bridgepp/Worker/Worker.h bridgepp/Worker/Overseer.h bridgepp/Worker/Overseer.cpp)
|
||||
|
||||
|
@ -167,7 +168,7 @@ target_precompile_headers(bridgepp PRIVATE Pch.h)
|
|||
|
||||
if (CMAKE_VERSION VERSION_GREATER_EQUAL "3.24.0")
|
||||
cmake_policy(SET CMP0135 NEW) # avoid warning DOWNLOAD_EXTRACT_TIMESTAMP
|
||||
endif()
|
||||
endif ()
|
||||
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
|
@ -188,7 +189,9 @@ enable_testing()
|
|||
add_executable(bridgepp-test EXCLUDE_FROM_ALL
|
||||
Test/TestBridgeUtils.cpp
|
||||
Test/TestException.cpp
|
||||
Test/TestWorker.cpp Test/TestWorker.h)
|
||||
Test/TestSessionID.cpp
|
||||
Test/TestWorker.cpp Test/TestWorker.h
|
||||
)
|
||||
add_dependencies(bridgepp-test bridgepp)
|
||||
target_precompile_headers(bridgepp-test PRIVATE Pch.h)
|
||||
target_link_libraries(bridgepp-test
|
||||
|
|
|
@ -0,0 +1,36 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
#include "QtCore/qdatetime.h"
|
||||
#include <gtest/gtest.h>
|
||||
#include <bridgepp/SessionID/SessionID.h>
|
||||
|
||||
|
||||
using namespace bridgepp;
|
||||
|
||||
|
||||
TEST(SessionID, SessionID) {
|
||||
QString const sessionID = newSessionID();
|
||||
EXPECT_TRUE(sessionID.size() > 0);
|
||||
|
||||
EXPECT_FALSE(sessionIDToDateTime("invalidSessionID").isValid());
|
||||
|
||||
QDateTime const dateTime = sessionIDToDateTime(sessionID);
|
||||
EXPECT_TRUE(dateTime.isValid());
|
||||
EXPECT_TRUE(qAbs(dateTime.secsTo(QDateTime::currentDateTime())) < 5);
|
||||
}
|
|
@ -1,128 +0,0 @@
|
|||
// Generated by the gRPC C++ plugin.
|
||||
// If you make any local change, they will be lost.
|
||||
// source: focus.proto
|
||||
|
||||
#include "focus.pb.h"
|
||||
#include "focus.grpc.pb.h"
|
||||
|
||||
#include <functional>
|
||||
#include <grpcpp/support/async_stream.h>
|
||||
#include <grpcpp/support/async_unary_call.h>
|
||||
#include <grpcpp/impl/channel_interface.h>
|
||||
#include <grpcpp/impl/client_unary_call.h>
|
||||
#include <grpcpp/support/client_callback.h>
|
||||
#include <grpcpp/support/message_allocator.h>
|
||||
#include <grpcpp/support/method_handler.h>
|
||||
#include <grpcpp/impl/rpc_service_method.h>
|
||||
#include <grpcpp/support/server_callback.h>
|
||||
#include <grpcpp/impl/codegen/server_callback_handlers.h>
|
||||
#include <grpcpp/server_context.h>
|
||||
#include <grpcpp/impl/service_type.h>
|
||||
#include <grpcpp/support/sync_stream.h>
|
||||
namespace focus {
|
||||
|
||||
static const char* Focus_method_names[] = {
|
||||
"/focus.Focus/Raise",
|
||||
"/focus.Focus/Version",
|
||||
};
|
||||
|
||||
std::unique_ptr< Focus::Stub> Focus::NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options) {
|
||||
(void)options;
|
||||
std::unique_ptr< Focus::Stub> stub(new Focus::Stub(channel, options));
|
||||
return stub;
|
||||
}
|
||||
|
||||
Focus::Stub::Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options)
|
||||
: channel_(channel), rpcmethod_Raise_(Focus_method_names[0], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
, rpcmethod_Version_(Focus_method_names[1], options.suffix_for_stats(),::grpc::internal::RpcMethod::NORMAL_RPC, channel)
|
||||
{}
|
||||
|
||||
::grpc::Status Focus::Stub::Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::google::protobuf::Empty* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall< ::google::protobuf::StringValue, ::google::protobuf::Empty, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_Raise_, context, request, response);
|
||||
}
|
||||
|
||||
void Focus::Stub::async::Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc::internal::CallbackUnaryCall< ::google::protobuf::StringValue, ::google::protobuf::Empty, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Raise_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void Focus::Stub::async::Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response, ::grpc::ClientUnaryReactor* reactor) {
|
||||
::grpc::internal::ClientCallbackUnaryFactory::Create< ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Raise_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>* Focus::Stub::PrepareAsyncRaiseRaw(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc::internal::ClientAsyncResponseReaderHelper::Create< ::google::protobuf::Empty, ::google::protobuf::StringValue, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), cq, rpcmethod_Raise_, context, request);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>* Focus::Stub::AsyncRaiseRaw(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) {
|
||||
auto* result =
|
||||
this->PrepareAsyncRaiseRaw(context, request, cq);
|
||||
result->StartCall();
|
||||
return result;
|
||||
}
|
||||
|
||||
::grpc::Status Focus::Stub::Version(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::focus::VersionResponse* response) {
|
||||
return ::grpc::internal::BlockingUnaryCall< ::google::protobuf::Empty, ::focus::VersionResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), rpcmethod_Version_, context, request, response);
|
||||
}
|
||||
|
||||
void Focus::Stub::async::Version(::grpc::ClientContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response, std::function<void(::grpc::Status)> f) {
|
||||
::grpc::internal::CallbackUnaryCall< ::google::protobuf::Empty, ::focus::VersionResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Version_, context, request, response, std::move(f));
|
||||
}
|
||||
|
||||
void Focus::Stub::async::Version(::grpc::ClientContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response, ::grpc::ClientUnaryReactor* reactor) {
|
||||
::grpc::internal::ClientCallbackUnaryFactory::Create< ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(stub_->channel_.get(), stub_->rpcmethod_Version_, context, request, response, reactor);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>* Focus::Stub::PrepareAsyncVersionRaw(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) {
|
||||
return ::grpc::internal::ClientAsyncResponseReaderHelper::Create< ::focus::VersionResponse, ::google::protobuf::Empty, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(channel_.get(), cq, rpcmethod_Version_, context, request);
|
||||
}
|
||||
|
||||
::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>* Focus::Stub::AsyncVersionRaw(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) {
|
||||
auto* result =
|
||||
this->PrepareAsyncVersionRaw(context, request, cq);
|
||||
result->StartCall();
|
||||
return result;
|
||||
}
|
||||
|
||||
Focus::Service::Service() {
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
Focus_method_names[0],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< Focus::Service, ::google::protobuf::StringValue, ::google::protobuf::Empty, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(
|
||||
[](Focus::Service* service,
|
||||
::grpc::ServerContext* ctx,
|
||||
const ::google::protobuf::StringValue* req,
|
||||
::google::protobuf::Empty* resp) {
|
||||
return service->Raise(ctx, req, resp);
|
||||
}, this)));
|
||||
AddMethod(new ::grpc::internal::RpcServiceMethod(
|
||||
Focus_method_names[1],
|
||||
::grpc::internal::RpcMethod::NORMAL_RPC,
|
||||
new ::grpc::internal::RpcMethodHandler< Focus::Service, ::google::protobuf::Empty, ::focus::VersionResponse, ::grpc::protobuf::MessageLite, ::grpc::protobuf::MessageLite>(
|
||||
[](Focus::Service* service,
|
||||
::grpc::ServerContext* ctx,
|
||||
const ::google::protobuf::Empty* req,
|
||||
::focus::VersionResponse* resp) {
|
||||
return service->Version(ctx, req, resp);
|
||||
}, this)));
|
||||
}
|
||||
|
||||
Focus::Service::~Service() {
|
||||
}
|
||||
|
||||
::grpc::Status Focus::Service::Raise(::grpc::ServerContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
::grpc::Status Focus::Service::Version(::grpc::ServerContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response) {
|
||||
(void) context;
|
||||
(void) request;
|
||||
(void) response;
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
|
||||
|
||||
} // namespace focus
|
||||
|
|
@ -1,418 +0,0 @@
|
|||
// Generated by the gRPC C++ plugin.
|
||||
// If you make any local change, they will be lost.
|
||||
// source: focus.proto
|
||||
// Original file comments:
|
||||
// Copyright (c) 2022 Proton Technologies AG
|
||||
//
|
||||
// This file is part of ProtonMail Bridge.
|
||||
//
|
||||
// ProtonMail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// ProtonMail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with ProtonMail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
//
|
||||
#ifndef GRPC_focus_2eproto__INCLUDED
|
||||
#define GRPC_focus_2eproto__INCLUDED
|
||||
|
||||
#include "focus.pb.h"
|
||||
|
||||
#include <functional>
|
||||
#include <grpcpp/generic/async_generic_service.h>
|
||||
#include <grpcpp/support/async_stream.h>
|
||||
#include <grpcpp/support/async_unary_call.h>
|
||||
#include <grpcpp/support/client_callback.h>
|
||||
#include <grpcpp/client_context.h>
|
||||
#include <grpcpp/completion_queue.h>
|
||||
#include <grpcpp/support/message_allocator.h>
|
||||
#include <grpcpp/support/method_handler.h>
|
||||
#include <grpcpp/impl/codegen/proto_utils.h>
|
||||
#include <grpcpp/impl/rpc_method.h>
|
||||
#include <grpcpp/support/server_callback.h>
|
||||
#include <grpcpp/impl/codegen/server_callback_handlers.h>
|
||||
#include <grpcpp/server_context.h>
|
||||
#include <grpcpp/impl/service_type.h>
|
||||
#include <grpcpp/impl/codegen/status.h>
|
||||
#include <grpcpp/support/stub_options.h>
|
||||
#include <grpcpp/support/sync_stream.h>
|
||||
|
||||
namespace focus {
|
||||
|
||||
// **********************************************************************************************************************
|
||||
// Service Declaration
|
||||
// **********************************************************************************************************************≠––
|
||||
class Focus final {
|
||||
public:
|
||||
static constexpr char const* service_full_name() {
|
||||
return "focus.Focus";
|
||||
}
|
||||
class StubInterface {
|
||||
public:
|
||||
virtual ~StubInterface() {}
|
||||
virtual ::grpc::Status Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::google::protobuf::Empty* response) = 0;
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::google::protobuf::Empty>> AsyncRaise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::google::protobuf::Empty>>(AsyncRaiseRaw(context, request, cq));
|
||||
}
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::google::protobuf::Empty>> PrepareAsyncRaise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::google::protobuf::Empty>>(PrepareAsyncRaiseRaw(context, request, cq));
|
||||
}
|
||||
virtual ::grpc::Status Version(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::focus::VersionResponse* response) = 0;
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::focus::VersionResponse>> AsyncVersion(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::focus::VersionResponse>>(AsyncVersionRaw(context, request, cq));
|
||||
}
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::focus::VersionResponse>> PrepareAsyncVersion(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReaderInterface< ::focus::VersionResponse>>(PrepareAsyncVersionRaw(context, request, cq));
|
||||
}
|
||||
class async_interface {
|
||||
public:
|
||||
virtual ~async_interface() {}
|
||||
virtual void Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response, std::function<void(::grpc::Status)>) = 0;
|
||||
virtual void Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response, ::grpc::ClientUnaryReactor* reactor) = 0;
|
||||
virtual void Version(::grpc::ClientContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response, std::function<void(::grpc::Status)>) = 0;
|
||||
virtual void Version(::grpc::ClientContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response, ::grpc::ClientUnaryReactor* reactor) = 0;
|
||||
};
|
||||
typedef class async_interface experimental_async_interface;
|
||||
virtual class async_interface* async() { return nullptr; }
|
||||
class async_interface* experimental_async() { return async(); }
|
||||
private:
|
||||
virtual ::grpc::ClientAsyncResponseReaderInterface< ::google::protobuf::Empty>* AsyncRaiseRaw(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) = 0;
|
||||
virtual ::grpc::ClientAsyncResponseReaderInterface< ::google::protobuf::Empty>* PrepareAsyncRaiseRaw(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) = 0;
|
||||
virtual ::grpc::ClientAsyncResponseReaderInterface< ::focus::VersionResponse>* AsyncVersionRaw(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) = 0;
|
||||
virtual ::grpc::ClientAsyncResponseReaderInterface< ::focus::VersionResponse>* PrepareAsyncVersionRaw(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) = 0;
|
||||
};
|
||||
class Stub final : public StubInterface {
|
||||
public:
|
||||
Stub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
|
||||
::grpc::Status Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::google::protobuf::Empty* response) override;
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>> AsyncRaise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>>(AsyncRaiseRaw(context, request, cq));
|
||||
}
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>> PrepareAsyncRaise(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>>(PrepareAsyncRaiseRaw(context, request, cq));
|
||||
}
|
||||
::grpc::Status Version(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::focus::VersionResponse* response) override;
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>> AsyncVersion(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>>(AsyncVersionRaw(context, request, cq));
|
||||
}
|
||||
std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>> PrepareAsyncVersion(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) {
|
||||
return std::unique_ptr< ::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>>(PrepareAsyncVersionRaw(context, request, cq));
|
||||
}
|
||||
class async final :
|
||||
public StubInterface::async_interface {
|
||||
public:
|
||||
void Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response, std::function<void(::grpc::Status)>) override;
|
||||
void Raise(::grpc::ClientContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response, ::grpc::ClientUnaryReactor* reactor) override;
|
||||
void Version(::grpc::ClientContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response, std::function<void(::grpc::Status)>) override;
|
||||
void Version(::grpc::ClientContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response, ::grpc::ClientUnaryReactor* reactor) override;
|
||||
private:
|
||||
friend class Stub;
|
||||
explicit async(Stub* stub): stub_(stub) { }
|
||||
Stub* stub() { return stub_; }
|
||||
Stub* stub_;
|
||||
};
|
||||
class async* async() override { return &async_stub_; }
|
||||
|
||||
private:
|
||||
std::shared_ptr< ::grpc::ChannelInterface> channel_;
|
||||
class async async_stub_{this};
|
||||
::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>* AsyncRaiseRaw(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) override;
|
||||
::grpc::ClientAsyncResponseReader< ::google::protobuf::Empty>* PrepareAsyncRaiseRaw(::grpc::ClientContext* context, const ::google::protobuf::StringValue& request, ::grpc::CompletionQueue* cq) override;
|
||||
::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>* AsyncVersionRaw(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) override;
|
||||
::grpc::ClientAsyncResponseReader< ::focus::VersionResponse>* PrepareAsyncVersionRaw(::grpc::ClientContext* context, const ::google::protobuf::Empty& request, ::grpc::CompletionQueue* cq) override;
|
||||
const ::grpc::internal::RpcMethod rpcmethod_Raise_;
|
||||
const ::grpc::internal::RpcMethod rpcmethod_Version_;
|
||||
};
|
||||
static std::unique_ptr<Stub> NewStub(const std::shared_ptr< ::grpc::ChannelInterface>& channel, const ::grpc::StubOptions& options = ::grpc::StubOptions());
|
||||
|
||||
class Service : public ::grpc::Service {
|
||||
public:
|
||||
Service();
|
||||
virtual ~Service();
|
||||
virtual ::grpc::Status Raise(::grpc::ServerContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response);
|
||||
virtual ::grpc::Status Version(::grpc::ServerContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response);
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithAsyncMethod_Raise : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithAsyncMethod_Raise() {
|
||||
::grpc::Service::MarkMethodAsync(0);
|
||||
}
|
||||
~WithAsyncMethod_Raise() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Raise(::grpc::ServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
void RequestRaise(::grpc::ServerContext* context, ::google::protobuf::StringValue* request, ::grpc::ServerAsyncResponseWriter< ::google::protobuf::Empty>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
|
||||
::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag);
|
||||
}
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithAsyncMethod_Version : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithAsyncMethod_Version() {
|
||||
::grpc::Service::MarkMethodAsync(1);
|
||||
}
|
||||
~WithAsyncMethod_Version() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Version(::grpc::ServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
void RequestVersion(::grpc::ServerContext* context, ::google::protobuf::Empty* request, ::grpc::ServerAsyncResponseWriter< ::focus::VersionResponse>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
|
||||
::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag);
|
||||
}
|
||||
};
|
||||
typedef WithAsyncMethod_Raise<WithAsyncMethod_Version<Service > > AsyncService;
|
||||
template <class BaseClass>
|
||||
class WithCallbackMethod_Raise : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithCallbackMethod_Raise() {
|
||||
::grpc::Service::MarkMethodCallback(0,
|
||||
new ::grpc::internal::CallbackUnaryHandler< ::google::protobuf::StringValue, ::google::protobuf::Empty>(
|
||||
[this](
|
||||
::grpc::CallbackServerContext* context, const ::google::protobuf::StringValue* request, ::google::protobuf::Empty* response) { return this->Raise(context, request, response); }));}
|
||||
void SetMessageAllocatorFor_Raise(
|
||||
::grpc::MessageAllocator< ::google::protobuf::StringValue, ::google::protobuf::Empty>* allocator) {
|
||||
::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(0);
|
||||
static_cast<::grpc::internal::CallbackUnaryHandler< ::google::protobuf::StringValue, ::google::protobuf::Empty>*>(handler)
|
||||
->SetMessageAllocator(allocator);
|
||||
}
|
||||
~WithCallbackMethod_Raise() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Raise(::grpc::ServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
virtual ::grpc::ServerUnaryReactor* Raise(
|
||||
::grpc::CallbackServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) { return nullptr; }
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithCallbackMethod_Version : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithCallbackMethod_Version() {
|
||||
::grpc::Service::MarkMethodCallback(1,
|
||||
new ::grpc::internal::CallbackUnaryHandler< ::google::protobuf::Empty, ::focus::VersionResponse>(
|
||||
[this](
|
||||
::grpc::CallbackServerContext* context, const ::google::protobuf::Empty* request, ::focus::VersionResponse* response) { return this->Version(context, request, response); }));}
|
||||
void SetMessageAllocatorFor_Version(
|
||||
::grpc::MessageAllocator< ::google::protobuf::Empty, ::focus::VersionResponse>* allocator) {
|
||||
::grpc::internal::MethodHandler* const handler = ::grpc::Service::GetHandler(1);
|
||||
static_cast<::grpc::internal::CallbackUnaryHandler< ::google::protobuf::Empty, ::focus::VersionResponse>*>(handler)
|
||||
->SetMessageAllocator(allocator);
|
||||
}
|
||||
~WithCallbackMethod_Version() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Version(::grpc::ServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
virtual ::grpc::ServerUnaryReactor* Version(
|
||||
::grpc::CallbackServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) { return nullptr; }
|
||||
};
|
||||
typedef WithCallbackMethod_Raise<WithCallbackMethod_Version<Service > > CallbackService;
|
||||
typedef CallbackService ExperimentalCallbackService;
|
||||
template <class BaseClass>
|
||||
class WithGenericMethod_Raise : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithGenericMethod_Raise() {
|
||||
::grpc::Service::MarkMethodGeneric(0);
|
||||
}
|
||||
~WithGenericMethod_Raise() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Raise(::grpc::ServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithGenericMethod_Version : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithGenericMethod_Version() {
|
||||
::grpc::Service::MarkMethodGeneric(1);
|
||||
}
|
||||
~WithGenericMethod_Version() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Version(::grpc::ServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithRawMethod_Raise : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithRawMethod_Raise() {
|
||||
::grpc::Service::MarkMethodRaw(0);
|
||||
}
|
||||
~WithRawMethod_Raise() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Raise(::grpc::ServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
void RequestRaise(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
|
||||
::grpc::Service::RequestAsyncUnary(0, context, request, response, new_call_cq, notification_cq, tag);
|
||||
}
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithRawMethod_Version : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithRawMethod_Version() {
|
||||
::grpc::Service::MarkMethodRaw(1);
|
||||
}
|
||||
~WithRawMethod_Version() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Version(::grpc::ServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
void RequestVersion(::grpc::ServerContext* context, ::grpc::ByteBuffer* request, ::grpc::ServerAsyncResponseWriter< ::grpc::ByteBuffer>* response, ::grpc::CompletionQueue* new_call_cq, ::grpc::ServerCompletionQueue* notification_cq, void *tag) {
|
||||
::grpc::Service::RequestAsyncUnary(1, context, request, response, new_call_cq, notification_cq, tag);
|
||||
}
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithRawCallbackMethod_Raise : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithRawCallbackMethod_Raise() {
|
||||
::grpc::Service::MarkMethodRawCallback(0,
|
||||
new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
|
||||
[this](
|
||||
::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Raise(context, request, response); }));
|
||||
}
|
||||
~WithRawCallbackMethod_Raise() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Raise(::grpc::ServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
virtual ::grpc::ServerUnaryReactor* Raise(
|
||||
::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; }
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithRawCallbackMethod_Version : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithRawCallbackMethod_Version() {
|
||||
::grpc::Service::MarkMethodRawCallback(1,
|
||||
new ::grpc::internal::CallbackUnaryHandler< ::grpc::ByteBuffer, ::grpc::ByteBuffer>(
|
||||
[this](
|
||||
::grpc::CallbackServerContext* context, const ::grpc::ByteBuffer* request, ::grpc::ByteBuffer* response) { return this->Version(context, request, response); }));
|
||||
}
|
||||
~WithRawCallbackMethod_Version() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable synchronous version of this method
|
||||
::grpc::Status Version(::grpc::ServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
virtual ::grpc::ServerUnaryReactor* Version(
|
||||
::grpc::CallbackServerContext* /*context*/, const ::grpc::ByteBuffer* /*request*/, ::grpc::ByteBuffer* /*response*/) { return nullptr; }
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithStreamedUnaryMethod_Raise : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithStreamedUnaryMethod_Raise() {
|
||||
::grpc::Service::MarkMethodStreamed(0,
|
||||
new ::grpc::internal::StreamedUnaryHandler<
|
||||
::google::protobuf::StringValue, ::google::protobuf::Empty>(
|
||||
[this](::grpc::ServerContext* context,
|
||||
::grpc::ServerUnaryStreamer<
|
||||
::google::protobuf::StringValue, ::google::protobuf::Empty>* streamer) {
|
||||
return this->StreamedRaise(context,
|
||||
streamer);
|
||||
}));
|
||||
}
|
||||
~WithStreamedUnaryMethod_Raise() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable regular version of this method
|
||||
::grpc::Status Raise(::grpc::ServerContext* /*context*/, const ::google::protobuf::StringValue* /*request*/, ::google::protobuf::Empty* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
// replace default version of method with streamed unary
|
||||
virtual ::grpc::Status StreamedRaise(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::google::protobuf::StringValue,::google::protobuf::Empty>* server_unary_streamer) = 0;
|
||||
};
|
||||
template <class BaseClass>
|
||||
class WithStreamedUnaryMethod_Version : public BaseClass {
|
||||
private:
|
||||
void BaseClassMustBeDerivedFromService(const Service* /*service*/) {}
|
||||
public:
|
||||
WithStreamedUnaryMethod_Version() {
|
||||
::grpc::Service::MarkMethodStreamed(1,
|
||||
new ::grpc::internal::StreamedUnaryHandler<
|
||||
::google::protobuf::Empty, ::focus::VersionResponse>(
|
||||
[this](::grpc::ServerContext* context,
|
||||
::grpc::ServerUnaryStreamer<
|
||||
::google::protobuf::Empty, ::focus::VersionResponse>* streamer) {
|
||||
return this->StreamedVersion(context,
|
||||
streamer);
|
||||
}));
|
||||
}
|
||||
~WithStreamedUnaryMethod_Version() override {
|
||||
BaseClassMustBeDerivedFromService(this);
|
||||
}
|
||||
// disable regular version of this method
|
||||
::grpc::Status Version(::grpc::ServerContext* /*context*/, const ::google::protobuf::Empty* /*request*/, ::focus::VersionResponse* /*response*/) override {
|
||||
abort();
|
||||
return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, "");
|
||||
}
|
||||
// replace default version of method with streamed unary
|
||||
virtual ::grpc::Status StreamedVersion(::grpc::ServerContext* context, ::grpc::ServerUnaryStreamer< ::google::protobuf::Empty,::focus::VersionResponse>* server_unary_streamer) = 0;
|
||||
};
|
||||
typedef WithStreamedUnaryMethod_Raise<WithStreamedUnaryMethod_Version<Service > > StreamedUnaryService;
|
||||
typedef Service SplitStreamedService;
|
||||
typedef WithStreamedUnaryMethod_Raise<WithStreamedUnaryMethod_Version<Service > > StreamedService;
|
||||
};
|
||||
|
||||
} // namespace focus
|
||||
|
||||
|
||||
#endif // GRPC_focus_2eproto__INCLUDED
|
|
@ -1,303 +0,0 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
// source: focus.proto
|
||||
|
||||
#include "focus.pb.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include <google/protobuf/io/coded_stream.h>
|
||||
#include <google/protobuf/extension_set.h>
|
||||
#include <google/protobuf/wire_format_lite.h>
|
||||
#include <google/protobuf/descriptor.h>
|
||||
#include <google/protobuf/generated_message_reflection.h>
|
||||
#include <google/protobuf/reflection_ops.h>
|
||||
#include <google/protobuf/wire_format.h>
|
||||
// @@protoc_insertion_point(includes)
|
||||
#include <google/protobuf/port_def.inc>
|
||||
|
||||
PROTOBUF_PRAGMA_INIT_SEG
|
||||
|
||||
namespace _pb = ::PROTOBUF_NAMESPACE_ID;
|
||||
namespace _pbi = _pb::internal;
|
||||
|
||||
namespace focus {
|
||||
PROTOBUF_CONSTEXPR VersionResponse::VersionResponse(
|
||||
::_pbi::ConstantInitialized): _impl_{
|
||||
/*decltype(_impl_.version_)*/{&::_pbi::fixed_address_empty_string, ::_pbi::ConstantInitialized{}}
|
||||
, /*decltype(_impl_._cached_size_)*/{}} {}
|
||||
struct VersionResponseDefaultTypeInternal {
|
||||
PROTOBUF_CONSTEXPR VersionResponseDefaultTypeInternal()
|
||||
: _instance(::_pbi::ConstantInitialized{}) {}
|
||||
~VersionResponseDefaultTypeInternal() {}
|
||||
union {
|
||||
VersionResponse _instance;
|
||||
};
|
||||
};
|
||||
PROTOBUF_ATTRIBUTE_NO_DESTROY PROTOBUF_CONSTINIT PROTOBUF_ATTRIBUTE_INIT_PRIORITY1 VersionResponseDefaultTypeInternal _VersionResponse_default_instance_;
|
||||
} // namespace focus
|
||||
static ::_pb::Metadata file_level_metadata_focus_2eproto[1];
|
||||
static constexpr ::_pb::EnumDescriptor const** file_level_enum_descriptors_focus_2eproto = nullptr;
|
||||
static constexpr ::_pb::ServiceDescriptor const** file_level_service_descriptors_focus_2eproto = nullptr;
|
||||
|
||||
const uint32_t TableStruct_focus_2eproto::offsets[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
|
||||
~0u, // no _has_bits_
|
||||
PROTOBUF_FIELD_OFFSET(::focus::VersionResponse, _internal_metadata_),
|
||||
~0u, // no _extensions_
|
||||
~0u, // no _oneof_case_
|
||||
~0u, // no _weak_field_map_
|
||||
~0u, // no _inlined_string_donated_
|
||||
PROTOBUF_FIELD_OFFSET(::focus::VersionResponse, _impl_.version_),
|
||||
};
|
||||
static const ::_pbi::MigrationSchema schemas[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) = {
|
||||
{ 0, -1, -1, sizeof(::focus::VersionResponse)},
|
||||
};
|
||||
|
||||
static const ::_pb::Message* const file_default_instances[] = {
|
||||
&::focus::_VersionResponse_default_instance_._instance,
|
||||
};
|
||||
|
||||
const char descriptor_table_protodef_focus_2eproto[] PROTOBUF_SECTION_VARIABLE(protodesc_cold) =
|
||||
"\n\013focus.proto\022\005focus\032\033google/protobuf/em"
|
||||
"pty.proto\032\036google/protobuf/wrappers.prot"
|
||||
"o\"\"\n\017VersionResponse\022\017\n\007version\030\001 \001(\t2\201\001"
|
||||
"\n\005Focus\022=\n\005Raise\022\034.google.protobuf.Strin"
|
||||
"gValue\032\026.google.protobuf.Empty\0229\n\007Versio"
|
||||
"n\022\026.google.protobuf.Empty\032\026.focus.Versio"
|
||||
"nResponseB=Z;github.com/ProtonMail/proto"
|
||||
"n-bridge/v3/internal/focus/protob\006proto3"
|
||||
;
|
||||
static const ::_pbi::DescriptorTable* const descriptor_table_focus_2eproto_deps[2] = {
|
||||
&::descriptor_table_google_2fprotobuf_2fempty_2eproto,
|
||||
&::descriptor_table_google_2fprotobuf_2fwrappers_2eproto,
|
||||
};
|
||||
static ::_pbi::once_flag descriptor_table_focus_2eproto_once;
|
||||
const ::_pbi::DescriptorTable descriptor_table_focus_2eproto = {
|
||||
false, false, 320, descriptor_table_protodef_focus_2eproto,
|
||||
"focus.proto",
|
||||
&descriptor_table_focus_2eproto_once, descriptor_table_focus_2eproto_deps, 2, 1,
|
||||
schemas, file_default_instances, TableStruct_focus_2eproto::offsets,
|
||||
file_level_metadata_focus_2eproto, file_level_enum_descriptors_focus_2eproto,
|
||||
file_level_service_descriptors_focus_2eproto,
|
||||
};
|
||||
PROTOBUF_ATTRIBUTE_WEAK const ::_pbi::DescriptorTable* descriptor_table_focus_2eproto_getter() {
|
||||
return &descriptor_table_focus_2eproto;
|
||||
}
|
||||
|
||||
// Force running AddDescriptors() at dynamic initialization time.
|
||||
PROTOBUF_ATTRIBUTE_INIT_PRIORITY2 static ::_pbi::AddDescriptorsRunner dynamic_init_dummy_focus_2eproto(&descriptor_table_focus_2eproto);
|
||||
namespace focus {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class VersionResponse::_Internal {
|
||||
public:
|
||||
};
|
||||
|
||||
VersionResponse::VersionResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
|
||||
bool is_message_owned)
|
||||
: ::PROTOBUF_NAMESPACE_ID::Message(arena, is_message_owned) {
|
||||
SharedCtor(arena, is_message_owned);
|
||||
// @@protoc_insertion_point(arena_constructor:focus.VersionResponse)
|
||||
}
|
||||
VersionResponse::VersionResponse(const VersionResponse& from)
|
||||
: ::PROTOBUF_NAMESPACE_ID::Message() {
|
||||
VersionResponse* const _this = this; (void)_this;
|
||||
new (&_impl_) Impl_{
|
||||
decltype(_impl_.version_){}
|
||||
, /*decltype(_impl_._cached_size_)*/{}};
|
||||
|
||||
_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
|
||||
_impl_.version_.InitDefault();
|
||||
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
_impl_.version_.Set("", GetArenaForAllocation());
|
||||
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
if (!from._internal_version().empty()) {
|
||||
_this->_impl_.version_.Set(from._internal_version(),
|
||||
_this->GetArenaForAllocation());
|
||||
}
|
||||
// @@protoc_insertion_point(copy_constructor:focus.VersionResponse)
|
||||
}
|
||||
|
||||
inline void VersionResponse::SharedCtor(
|
||||
::_pb::Arena* arena, bool is_message_owned) {
|
||||
(void)arena;
|
||||
(void)is_message_owned;
|
||||
new (&_impl_) Impl_{
|
||||
decltype(_impl_.version_){}
|
||||
, /*decltype(_impl_._cached_size_)*/{}
|
||||
};
|
||||
_impl_.version_.InitDefault();
|
||||
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
_impl_.version_.Set("", GetArenaForAllocation());
|
||||
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
}
|
||||
|
||||
VersionResponse::~VersionResponse() {
|
||||
// @@protoc_insertion_point(destructor:focus.VersionResponse)
|
||||
if (auto *arena = _internal_metadata_.DeleteReturnArena<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>()) {
|
||||
(void)arena;
|
||||
return;
|
||||
}
|
||||
SharedDtor();
|
||||
}
|
||||
|
||||
inline void VersionResponse::SharedDtor() {
|
||||
GOOGLE_DCHECK(GetArenaForAllocation() == nullptr);
|
||||
_impl_.version_.Destroy();
|
||||
}
|
||||
|
||||
void VersionResponse::SetCachedSize(int size) const {
|
||||
_impl_._cached_size_.Set(size);
|
||||
}
|
||||
|
||||
void VersionResponse::Clear() {
|
||||
// @@protoc_insertion_point(message_clear_start:focus.VersionResponse)
|
||||
uint32_t cached_has_bits = 0;
|
||||
// Prevent compiler warnings about cached_has_bits being unused
|
||||
(void) cached_has_bits;
|
||||
|
||||
_impl_.version_.ClearToEmpty();
|
||||
_internal_metadata_.Clear<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>();
|
||||
}
|
||||
|
||||
const char* VersionResponse::_InternalParse(const char* ptr, ::_pbi::ParseContext* ctx) {
|
||||
#define CHK_(x) if (PROTOBUF_PREDICT_FALSE(!(x))) goto failure
|
||||
while (!ctx->Done(&ptr)) {
|
||||
uint32_t tag;
|
||||
ptr = ::_pbi::ReadTag(ptr, &tag);
|
||||
switch (tag >> 3) {
|
||||
// string version = 1;
|
||||
case 1:
|
||||
if (PROTOBUF_PREDICT_TRUE(static_cast<uint8_t>(tag) == 10)) {
|
||||
auto str = _internal_mutable_version();
|
||||
ptr = ::_pbi::InlineGreedyStringParser(str, ptr, ctx);
|
||||
CHK_(ptr);
|
||||
CHK_(::_pbi::VerifyUTF8(str, "focus.VersionResponse.version"));
|
||||
} else
|
||||
goto handle_unusual;
|
||||
continue;
|
||||
default:
|
||||
goto handle_unusual;
|
||||
} // switch
|
||||
handle_unusual:
|
||||
if ((tag == 0) || ((tag & 7) == 4)) {
|
||||
CHK_(ptr);
|
||||
ctx->SetLastTag(tag);
|
||||
goto message_done;
|
||||
}
|
||||
ptr = UnknownFieldParse(
|
||||
tag,
|
||||
_internal_metadata_.mutable_unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(),
|
||||
ptr, ctx);
|
||||
CHK_(ptr != nullptr);
|
||||
} // while
|
||||
message_done:
|
||||
return ptr;
|
||||
failure:
|
||||
ptr = nullptr;
|
||||
goto message_done;
|
||||
#undef CHK_
|
||||
}
|
||||
|
||||
uint8_t* VersionResponse::_InternalSerialize(
|
||||
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const {
|
||||
// @@protoc_insertion_point(serialize_to_array_start:focus.VersionResponse)
|
||||
uint32_t cached_has_bits = 0;
|
||||
(void) cached_has_bits;
|
||||
|
||||
// string version = 1;
|
||||
if (!this->_internal_version().empty()) {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::VerifyUtf8String(
|
||||
this->_internal_version().data(), static_cast<int>(this->_internal_version().length()),
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::SERIALIZE,
|
||||
"focus.VersionResponse.version");
|
||||
target = stream->WriteStringMaybeAliased(
|
||||
1, this->_internal_version(), target);
|
||||
}
|
||||
|
||||
if (PROTOBUF_PREDICT_FALSE(_internal_metadata_.have_unknown_fields())) {
|
||||
target = ::_pbi::WireFormat::InternalSerializeUnknownFieldsToArray(
|
||||
_internal_metadata_.unknown_fields<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(::PROTOBUF_NAMESPACE_ID::UnknownFieldSet::default_instance), target, stream);
|
||||
}
|
||||
// @@protoc_insertion_point(serialize_to_array_end:focus.VersionResponse)
|
||||
return target;
|
||||
}
|
||||
|
||||
size_t VersionResponse::ByteSizeLong() const {
|
||||
// @@protoc_insertion_point(message_byte_size_start:focus.VersionResponse)
|
||||
size_t total_size = 0;
|
||||
|
||||
uint32_t cached_has_bits = 0;
|
||||
// Prevent compiler warnings about cached_has_bits being unused
|
||||
(void) cached_has_bits;
|
||||
|
||||
// string version = 1;
|
||||
if (!this->_internal_version().empty()) {
|
||||
total_size += 1 +
|
||||
::PROTOBUF_NAMESPACE_ID::internal::WireFormatLite::StringSize(
|
||||
this->_internal_version());
|
||||
}
|
||||
|
||||
return MaybeComputeUnknownFieldsSize(total_size, &_impl_._cached_size_);
|
||||
}
|
||||
|
||||
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData VersionResponse::_class_data_ = {
|
||||
::PROTOBUF_NAMESPACE_ID::Message::CopyWithSourceCheck,
|
||||
VersionResponse::MergeImpl
|
||||
};
|
||||
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*VersionResponse::GetClassData() const { return &_class_data_; }
|
||||
|
||||
|
||||
void VersionResponse::MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg) {
|
||||
auto* const _this = static_cast<VersionResponse*>(&to_msg);
|
||||
auto& from = static_cast<const VersionResponse&>(from_msg);
|
||||
// @@protoc_insertion_point(class_specific_merge_from_start:focus.VersionResponse)
|
||||
GOOGLE_DCHECK_NE(&from, _this);
|
||||
uint32_t cached_has_bits = 0;
|
||||
(void) cached_has_bits;
|
||||
|
||||
if (!from._internal_version().empty()) {
|
||||
_this->_internal_set_version(from._internal_version());
|
||||
}
|
||||
_this->_internal_metadata_.MergeFrom<::PROTOBUF_NAMESPACE_ID::UnknownFieldSet>(from._internal_metadata_);
|
||||
}
|
||||
|
||||
void VersionResponse::CopyFrom(const VersionResponse& from) {
|
||||
// @@protoc_insertion_point(class_specific_copy_from_start:focus.VersionResponse)
|
||||
if (&from == this) return;
|
||||
Clear();
|
||||
MergeFrom(from);
|
||||
}
|
||||
|
||||
bool VersionResponse::IsInitialized() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
void VersionResponse::InternalSwap(VersionResponse* other) {
|
||||
using std::swap;
|
||||
auto* lhs_arena = GetArenaForAllocation();
|
||||
auto* rhs_arena = other->GetArenaForAllocation();
|
||||
_internal_metadata_.InternalSwap(&other->_internal_metadata_);
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr::InternalSwap(
|
||||
&_impl_.version_, lhs_arena,
|
||||
&other->_impl_.version_, rhs_arena
|
||||
);
|
||||
}
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::Metadata VersionResponse::GetMetadata() const {
|
||||
return ::_pbi::AssignDescriptors(
|
||||
&descriptor_table_focus_2eproto_getter, &descriptor_table_focus_2eproto_once,
|
||||
file_level_metadata_focus_2eproto[0]);
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(namespace_scope)
|
||||
} // namespace focus
|
||||
PROTOBUF_NAMESPACE_OPEN
|
||||
template<> PROTOBUF_NOINLINE ::focus::VersionResponse*
|
||||
Arena::CreateMaybeMessage< ::focus::VersionResponse >(Arena* arena) {
|
||||
return Arena::CreateMessageInternal< ::focus::VersionResponse >(arena);
|
||||
}
|
||||
PROTOBUF_NAMESPACE_CLOSE
|
||||
|
||||
// @@protoc_insertion_point(global_scope)
|
||||
#include <google/protobuf/port_undef.inc>
|
|
@ -1,284 +0,0 @@
|
|||
// Generated by the protocol buffer compiler. DO NOT EDIT!
|
||||
// source: focus.proto
|
||||
|
||||
#ifndef GOOGLE_PROTOBUF_INCLUDED_focus_2eproto
|
||||
#define GOOGLE_PROTOBUF_INCLUDED_focus_2eproto
|
||||
|
||||
#include <limits>
|
||||
#include <string>
|
||||
|
||||
#include <google/protobuf/port_def.inc>
|
||||
#if PROTOBUF_VERSION < 3021000
|
||||
#error This file was generated by a newer version of protoc which is
|
||||
#error incompatible with your Protocol Buffer headers. Please update
|
||||
#error your headers.
|
||||
#endif
|
||||
#if 3021012 < PROTOBUF_MIN_PROTOC_VERSION
|
||||
#error This file was generated by an older version of protoc which is
|
||||
#error incompatible with your Protocol Buffer headers. Please
|
||||
#error regenerate this file with a newer version of protoc.
|
||||
#endif
|
||||
|
||||
#include <google/protobuf/port_undef.inc>
|
||||
#include <google/protobuf/io/coded_stream.h>
|
||||
#include <google/protobuf/arena.h>
|
||||
#include <google/protobuf/arenastring.h>
|
||||
#include <google/protobuf/generated_message_util.h>
|
||||
#include <google/protobuf/metadata_lite.h>
|
||||
#include <google/protobuf/generated_message_reflection.h>
|
||||
#include <google/protobuf/message.h>
|
||||
#include <google/protobuf/repeated_field.h> // IWYU pragma: export
|
||||
#include <google/protobuf/extension_set.h> // IWYU pragma: export
|
||||
#include <google/protobuf/unknown_field_set.h>
|
||||
#include <google/protobuf/empty.pb.h>
|
||||
#include <google/protobuf/wrappers.pb.h>
|
||||
// @@protoc_insertion_point(includes)
|
||||
#include <google/protobuf/port_def.inc>
|
||||
#define PROTOBUF_INTERNAL_EXPORT_focus_2eproto
|
||||
PROTOBUF_NAMESPACE_OPEN
|
||||
namespace internal {
|
||||
class AnyMetadata;
|
||||
} // namespace internal
|
||||
PROTOBUF_NAMESPACE_CLOSE
|
||||
|
||||
// Internal implementation detail -- do not use these members.
|
||||
struct TableStruct_focus_2eproto {
|
||||
static const uint32_t offsets[];
|
||||
};
|
||||
extern const ::PROTOBUF_NAMESPACE_ID::internal::DescriptorTable descriptor_table_focus_2eproto;
|
||||
namespace focus {
|
||||
class VersionResponse;
|
||||
struct VersionResponseDefaultTypeInternal;
|
||||
extern VersionResponseDefaultTypeInternal _VersionResponse_default_instance_;
|
||||
} // namespace focus
|
||||
PROTOBUF_NAMESPACE_OPEN
|
||||
template<> ::focus::VersionResponse* Arena::CreateMaybeMessage<::focus::VersionResponse>(Arena*);
|
||||
PROTOBUF_NAMESPACE_CLOSE
|
||||
namespace focus {
|
||||
|
||||
// ===================================================================
|
||||
|
||||
class VersionResponse final :
|
||||
public ::PROTOBUF_NAMESPACE_ID::Message /* @@protoc_insertion_point(class_definition:focus.VersionResponse) */ {
|
||||
public:
|
||||
inline VersionResponse() : VersionResponse(nullptr) {}
|
||||
~VersionResponse() override;
|
||||
explicit PROTOBUF_CONSTEXPR VersionResponse(::PROTOBUF_NAMESPACE_ID::internal::ConstantInitialized);
|
||||
|
||||
VersionResponse(const VersionResponse& from);
|
||||
VersionResponse(VersionResponse&& from) noexcept
|
||||
: VersionResponse() {
|
||||
*this = ::std::move(from);
|
||||
}
|
||||
|
||||
inline VersionResponse& operator=(const VersionResponse& from) {
|
||||
CopyFrom(from);
|
||||
return *this;
|
||||
}
|
||||
inline VersionResponse& operator=(VersionResponse&& from) noexcept {
|
||||
if (this == &from) return *this;
|
||||
if (GetOwningArena() == from.GetOwningArena()
|
||||
#ifdef PROTOBUF_FORCE_COPY_IN_MOVE
|
||||
&& GetOwningArena() != nullptr
|
||||
#endif // !PROTOBUF_FORCE_COPY_IN_MOVE
|
||||
) {
|
||||
InternalSwap(&from);
|
||||
} else {
|
||||
CopyFrom(from);
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* descriptor() {
|
||||
return GetDescriptor();
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Descriptor* GetDescriptor() {
|
||||
return default_instance().GetMetadata().descriptor;
|
||||
}
|
||||
static const ::PROTOBUF_NAMESPACE_ID::Reflection* GetReflection() {
|
||||
return default_instance().GetMetadata().reflection;
|
||||
}
|
||||
static const VersionResponse& default_instance() {
|
||||
return *internal_default_instance();
|
||||
}
|
||||
static inline const VersionResponse* internal_default_instance() {
|
||||
return reinterpret_cast<const VersionResponse*>(
|
||||
&_VersionResponse_default_instance_);
|
||||
}
|
||||
static constexpr int kIndexInFileMessages =
|
||||
0;
|
||||
|
||||
friend void swap(VersionResponse& a, VersionResponse& b) {
|
||||
a.Swap(&b);
|
||||
}
|
||||
inline void Swap(VersionResponse* other) {
|
||||
if (other == this) return;
|
||||
#ifdef PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
if (GetOwningArena() != nullptr &&
|
||||
GetOwningArena() == other->GetOwningArena()) {
|
||||
#else // PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
if (GetOwningArena() == other->GetOwningArena()) {
|
||||
#endif // !PROTOBUF_FORCE_COPY_IN_SWAP
|
||||
InternalSwap(other);
|
||||
} else {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::GenericSwap(this, other);
|
||||
}
|
||||
}
|
||||
void UnsafeArenaSwap(VersionResponse* other) {
|
||||
if (other == this) return;
|
||||
GOOGLE_DCHECK(GetOwningArena() == other->GetOwningArena());
|
||||
InternalSwap(other);
|
||||
}
|
||||
|
||||
// implements Message ----------------------------------------------
|
||||
|
||||
VersionResponse* New(::PROTOBUF_NAMESPACE_ID::Arena* arena = nullptr) const final {
|
||||
return CreateMaybeMessage<VersionResponse>(arena);
|
||||
}
|
||||
using ::PROTOBUF_NAMESPACE_ID::Message::CopyFrom;
|
||||
void CopyFrom(const VersionResponse& from);
|
||||
using ::PROTOBUF_NAMESPACE_ID::Message::MergeFrom;
|
||||
void MergeFrom( const VersionResponse& from) {
|
||||
VersionResponse::MergeImpl(*this, from);
|
||||
}
|
||||
private:
|
||||
static void MergeImpl(::PROTOBUF_NAMESPACE_ID::Message& to_msg, const ::PROTOBUF_NAMESPACE_ID::Message& from_msg);
|
||||
public:
|
||||
PROTOBUF_ATTRIBUTE_REINITIALIZES void Clear() final;
|
||||
bool IsInitialized() const final;
|
||||
|
||||
size_t ByteSizeLong() const final;
|
||||
const char* _InternalParse(const char* ptr, ::PROTOBUF_NAMESPACE_ID::internal::ParseContext* ctx) final;
|
||||
uint8_t* _InternalSerialize(
|
||||
uint8_t* target, ::PROTOBUF_NAMESPACE_ID::io::EpsCopyOutputStream* stream) const final;
|
||||
int GetCachedSize() const final { return _impl_._cached_size_.Get(); }
|
||||
|
||||
private:
|
||||
void SharedCtor(::PROTOBUF_NAMESPACE_ID::Arena* arena, bool is_message_owned);
|
||||
void SharedDtor();
|
||||
void SetCachedSize(int size) const final;
|
||||
void InternalSwap(VersionResponse* other);
|
||||
|
||||
private:
|
||||
friend class ::PROTOBUF_NAMESPACE_ID::internal::AnyMetadata;
|
||||
static ::PROTOBUF_NAMESPACE_ID::StringPiece FullMessageName() {
|
||||
return "focus.VersionResponse";
|
||||
}
|
||||
protected:
|
||||
explicit VersionResponse(::PROTOBUF_NAMESPACE_ID::Arena* arena,
|
||||
bool is_message_owned = false);
|
||||
public:
|
||||
|
||||
static const ClassData _class_data_;
|
||||
const ::PROTOBUF_NAMESPACE_ID::Message::ClassData*GetClassData() const final;
|
||||
|
||||
::PROTOBUF_NAMESPACE_ID::Metadata GetMetadata() const final;
|
||||
|
||||
// nested types ----------------------------------------------------
|
||||
|
||||
// accessors -------------------------------------------------------
|
||||
|
||||
enum : int {
|
||||
kVersionFieldNumber = 1,
|
||||
};
|
||||
// string version = 1;
|
||||
void clear_version();
|
||||
const std::string& version() const;
|
||||
template <typename ArgT0 = const std::string&, typename... ArgT>
|
||||
void set_version(ArgT0&& arg0, ArgT... args);
|
||||
std::string* mutable_version();
|
||||
PROTOBUF_NODISCARD std::string* release_version();
|
||||
void set_allocated_version(std::string* version);
|
||||
private:
|
||||
const std::string& _internal_version() const;
|
||||
inline PROTOBUF_ALWAYS_INLINE void _internal_set_version(const std::string& value);
|
||||
std::string* _internal_mutable_version();
|
||||
public:
|
||||
|
||||
// @@protoc_insertion_point(class_scope:focus.VersionResponse)
|
||||
private:
|
||||
class _Internal;
|
||||
|
||||
template <typename T> friend class ::PROTOBUF_NAMESPACE_ID::Arena::InternalHelper;
|
||||
typedef void InternalArenaConstructable_;
|
||||
typedef void DestructorSkippable_;
|
||||
struct Impl_ {
|
||||
::PROTOBUF_NAMESPACE_ID::internal::ArenaStringPtr version_;
|
||||
mutable ::PROTOBUF_NAMESPACE_ID::internal::CachedSize _cached_size_;
|
||||
};
|
||||
union { Impl_ _impl_; };
|
||||
friend struct ::TableStruct_focus_2eproto;
|
||||
};
|
||||
// ===================================================================
|
||||
|
||||
|
||||
// ===================================================================
|
||||
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
|
||||
#endif // __GNUC__
|
||||
// VersionResponse
|
||||
|
||||
// string version = 1;
|
||||
inline void VersionResponse::clear_version() {
|
||||
_impl_.version_.ClearToEmpty();
|
||||
}
|
||||
inline const std::string& VersionResponse::version() const {
|
||||
// @@protoc_insertion_point(field_get:focus.VersionResponse.version)
|
||||
return _internal_version();
|
||||
}
|
||||
template <typename ArgT0, typename... ArgT>
|
||||
inline PROTOBUF_ALWAYS_INLINE
|
||||
void VersionResponse::set_version(ArgT0&& arg0, ArgT... args) {
|
||||
|
||||
_impl_.version_.Set(static_cast<ArgT0 &&>(arg0), args..., GetArenaForAllocation());
|
||||
// @@protoc_insertion_point(field_set:focus.VersionResponse.version)
|
||||
}
|
||||
inline std::string* VersionResponse::mutable_version() {
|
||||
std::string* _s = _internal_mutable_version();
|
||||
// @@protoc_insertion_point(field_mutable:focus.VersionResponse.version)
|
||||
return _s;
|
||||
}
|
||||
inline const std::string& VersionResponse::_internal_version() const {
|
||||
return _impl_.version_.Get();
|
||||
}
|
||||
inline void VersionResponse::_internal_set_version(const std::string& value) {
|
||||
|
||||
_impl_.version_.Set(value, GetArenaForAllocation());
|
||||
}
|
||||
inline std::string* VersionResponse::_internal_mutable_version() {
|
||||
|
||||
return _impl_.version_.Mutable(GetArenaForAllocation());
|
||||
}
|
||||
inline std::string* VersionResponse::release_version() {
|
||||
// @@protoc_insertion_point(field_release:focus.VersionResponse.version)
|
||||
return _impl_.version_.Release();
|
||||
}
|
||||
inline void VersionResponse::set_allocated_version(std::string* version) {
|
||||
if (version != nullptr) {
|
||||
|
||||
} else {
|
||||
|
||||
}
|
||||
_impl_.version_.SetAllocated(version, GetArenaForAllocation());
|
||||
#ifdef PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
if (_impl_.version_.IsDefault()) {
|
||||
_impl_.version_.Set("", GetArenaForAllocation());
|
||||
}
|
||||
#endif // PROTOBUF_FORCE_COPY_DEFAULT_STRING
|
||||
// @@protoc_insertion_point(field_set_allocated:focus.VersionResponse.version)
|
||||
}
|
||||
|
||||
#ifdef __GNUC__
|
||||
#pragma GCC diagnostic pop
|
||||
#endif // __GNUC__
|
||||
|
||||
// @@protoc_insertion_point(namespace_scope)
|
||||
|
||||
} // namespace focus
|
||||
|
||||
// @@protoc_insertion_point(global_scope)
|
||||
|
||||
#include <google/protobuf/port_undef.inc>
|
||||
#endif // GOOGLE_PROTOBUF_INCLUDED_GOOGLE_PROTOBUF_INCLUDED_focus_2eproto
|
|
@ -57,11 +57,13 @@ void GRPCClient::removeServiceConfigFile(QString const &configDir) {
|
|||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] sessionID The sessionID.
|
||||
/// \param[in] timeoutMs The timeout in milliseconds
|
||||
/// \param[in] serverProcess An optional server process to monitor. If the process it, no need and retry, as connexion cannot be established. Ignored if null.
|
||||
/// \return The service config.
|
||||
//****************************************************************************************************************************************************
|
||||
GRPCConfig GRPCClient::waitAndRetrieveServiceConfig(QString const &configDir, qint64 timeoutMs, ProcessMonitor *serverProcess) {
|
||||
GRPCConfig GRPCClient::waitAndRetrieveServiceConfig(QString const & sessionID, QString const &configDir, qint64 timeoutMs,
|
||||
ProcessMonitor *serverProcess) {
|
||||
QString const path = grpcServerConfigPath(configDir);
|
||||
QFile file(path);
|
||||
|
||||
|
@ -71,7 +73,7 @@ GRPCConfig GRPCClient::waitAndRetrieveServiceConfig(QString const &configDir, qi
|
|||
while (true) {
|
||||
if (serverProcess && serverProcess->getStatus().ended) {
|
||||
throw Exception("Bridge application exited before providing a gRPC service configuration file.", QString(), __FUNCTION__,
|
||||
tailOfLatestBridgeLog());
|
||||
tailOfLatestBridgeLog(sessionID));
|
||||
}
|
||||
|
||||
if (file.exists()) {
|
||||
|
@ -85,7 +87,7 @@ GRPCConfig GRPCClient::waitAndRetrieveServiceConfig(QString const &configDir, qi
|
|||
}
|
||||
|
||||
if (!found) {
|
||||
throw Exception("Server did not provide gRPC service configuration in time.", QString(), __FUNCTION__, tailOfLatestBridgeLog());
|
||||
throw Exception("Server did not provide gRPC service configuration in time.", QString(), __FUNCTION__, tailOfLatestBridgeLog(sessionID));
|
||||
}
|
||||
|
||||
GRPCConfig sc;
|
||||
|
@ -114,10 +116,12 @@ void GRPCClient::setLog(Log *log) {
|
|||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] sessionID The sessionID.
|
||||
/// \param[in] configDir The configuration directory
|
||||
/// \param[in] serverProcess An optional server process to monitor. If the process it, no need and retry, as connexion cannot be established. Ignored if null.
|
||||
/// \return true iff the connection was successful.
|
||||
//****************************************************************************************************************************************************
|
||||
void GRPCClient::connectToServer(QString const &configDir, GRPCConfig const &config, ProcessMonitor *serverProcess) {
|
||||
void GRPCClient::connectToServer(QString const &sessionID, QString const &configDir, GRPCConfig const &config, ProcessMonitor *serverProcess) {
|
||||
try {
|
||||
serverToken_ = config.token.toStdString();
|
||||
QString address;
|
||||
|
@ -147,7 +151,7 @@ void GRPCClient::connectToServer(QString const &configDir, GRPCConfig const &con
|
|||
while (true) {
|
||||
if (serverProcess && serverProcess->getStatus().ended) {
|
||||
throw Exception("Bridge application ended before gRPC connexion could be established.", QString(), __FUNCTION__,
|
||||
tailOfLatestBridgeLog());
|
||||
tailOfLatestBridgeLog(sessionID));
|
||||
}
|
||||
|
||||
this->logInfo(QString("Connection to gRPC server at %1. attempt #%2").arg(address).arg(++i));
|
||||
|
@ -158,7 +162,7 @@ void GRPCClient::connectToServer(QString const &configDir, GRPCConfig const &con
|
|||
|
||||
if (QDateTime::currentDateTime() > giveUpTime) {
|
||||
throw Exception("Connection to the gRPC server failed because of a timeout.", QString(), __FUNCTION__,
|
||||
tailOfLatestBridgeLog());
|
||||
tailOfLatestBridgeLog(sessionID));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -49,7 +49,8 @@ class GRPCClient : public QObject {
|
|||
Q_OBJECT
|
||||
public: // static member functions
|
||||
static void removeServiceConfigFile(QString const &configDir); ///< Delete the service config file.
|
||||
static GRPCConfig waitAndRetrieveServiceConfig(QString const &configDir, qint64 timeoutMs, class ProcessMonitor *serverProcess); ///< Wait and retrieve the service configuration.
|
||||
static GRPCConfig waitAndRetrieveServiceConfig(QString const &sessionID, QString const &configDir, qint64 timeoutMs,
|
||||
class ProcessMonitor *serverProcess); ///< Wait and retrieve the service configuration.
|
||||
|
||||
public: // member functions.
|
||||
GRPCClient() = default; ///< Default constructor.
|
||||
|
@ -59,7 +60,7 @@ public: // member functions.
|
|||
GRPCClient &operator=(GRPCClient const &) = delete; ///< Disabled assignment operator.
|
||||
GRPCClient &operator=(GRPCClient &&) = delete; ///< Disabled move assignment operator.
|
||||
void setLog(Log *log); ///< Set the log for the client.
|
||||
void connectToServer(QString const &configDir, GRPCConfig const &config, class ProcessMonitor *serverProcess); ///< Establish connection to the gRPC server.
|
||||
void connectToServer(QString const &sessionID, QString const &configDir, GRPCConfig const &config, class ProcessMonitor *serverProcess); ///< Establish connection to the gRPC server.
|
||||
bool isConnected() const; ///< Check whether the gRPC client is connected to the server.
|
||||
|
||||
grpc::Status checkTokens(QString const &clientConfigPath, QString &outReturnedClientToken); ///< Performs a token check.
|
||||
|
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -35,34 +35,29 @@ QString userLogsDir() {
|
|||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \brief Return the path of the latest bridge log.
|
||||
///
|
||||
/// \param[in] sessionID The sessionID.
|
||||
/// \return The path of the latest bridge log file.
|
||||
/// \return An empty string if no bridge log file was found.
|
||||
//****************************************************************************************************************************************************
|
||||
QString latestBridgeLogPath() {
|
||||
QString latestBridgeLogPath(QString const &sessionID) {
|
||||
QDir const logsDir(userLogsDir());
|
||||
if (logsDir.isEmpty()) {
|
||||
return QString();
|
||||
}
|
||||
|
||||
QFileInfoList files = logsDir.entryInfoList({ "v*.log" }, QDir::Files); // could do sorting, but only by last modification time. we want to sort by creation time.
|
||||
if (files.isEmpty()) {
|
||||
return QString();
|
||||
}
|
||||
|
||||
std::sort(files.begin(), files.end(), [](QFileInfo const &lhs, QFileInfo const &rhs) -> bool {
|
||||
return lhs.birthTime() < rhs.birthTime();
|
||||
});
|
||||
return files.back().absoluteFilePath();
|
||||
QFileInfoList const files = logsDir.entryInfoList({ sessionID + "_bri_*.log" }, QDir::Files, QDir::Name);
|
||||
return files.isEmpty() ? QString() : files.back().absoluteFilePath();
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// Return the maxSize last bytes of the latest bridge log.
|
||||
//****************************************************************************************************************************************************
|
||||
QByteArray tailOfLatestBridgeLog() {
|
||||
QString path = latestBridgeLogPath();
|
||||
QByteArray tailOfLatestBridgeLog(QString const &sessionID) {
|
||||
QString path = latestBridgeLogPath(sessionID);
|
||||
if (path.isEmpty()) {
|
||||
return QByteArray();
|
||||
return QString("We could not find a bridge log file for the current session.").toLocal8Bit();
|
||||
}
|
||||
|
||||
QFile file(path);
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace bridgepp {
|
|||
|
||||
|
||||
QString userLogsDir(); ///< Return the path of the user logs dir.
|
||||
QByteArray tailOfLatestBridgeLog(); ///< Return the last bytes of the last bridge log.
|
||||
QByteArray tailOfLatestBridgeLog(QString const &sessionID); ///< Return the last bytes of the last bridge log.
|
||||
|
||||
|
||||
} // namespace bridgepp
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
#include "SessionID.h"
|
||||
#include "QtCore/qdatetime.h"
|
||||
|
||||
|
||||
namespace {
|
||||
|
||||
|
||||
QString const dateTimeFormat = "yyyyMMdd_hhmmsszzz"; ///< The format string for date/time used by the sessionID.
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
namespace bridgepp {
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \return a new session ID based on the current local date/time
|
||||
//****************************************************************************************************************************************************
|
||||
QString newSessionID() {
|
||||
return QDateTime::currentDateTime().toString(dateTimeFormat);
|
||||
}
|
||||
|
||||
|
||||
//****************************************************************************************************************************************************
|
||||
/// \param[in] sessionID The sessionID.
|
||||
/// \return The date/time corresponding to the sessionID.
|
||||
/// \return An invalid date/time if an error occurs.
|
||||
//****************************************************************************************************************************************************
|
||||
QDateTime sessionIDToDateTime(QString const &sessionID) {
|
||||
return QDateTime::fromString(sessionID, dateTimeFormat);
|
||||
}
|
||||
|
||||
|
||||
} // namespace
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
#ifndef BRIDGE_PP_SESSION_ID_H
|
||||
#define BRIDGE_PP_SESSION_ID_H
|
||||
|
||||
|
||||
namespace bridgepp {
|
||||
|
||||
|
||||
QString newSessionID(); ///< Create a new sessions
|
||||
QDateTime sessionIDToDateTime(QString const &sessionID); ///< Parse the date/time from a sessionID.
|
||||
|
||||
|
||||
} // namespace
|
||||
|
||||
#endif //BRIDGE_PP_SESSION_ID_H
|
|
@ -0,0 +1,74 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/abiosoft/ishell"
|
||||
)
|
||||
|
||||
func (f *frontendCLI) debugMailboxState(c *ishell.Context) {
|
||||
f.ShowPrompt(false)
|
||||
defer f.ShowPrompt(true)
|
||||
|
||||
checkFlags := f.yesNoQuestion("Also check message flags")
|
||||
|
||||
c.Println("Starting state check. Note that depending on your message count this may take a while.")
|
||||
|
||||
result, err := f.bridge.CheckClientState(context.Background(), checkFlags, func(s string) {
|
||||
c.Println(s)
|
||||
})
|
||||
if err != nil {
|
||||
c.Printf("State check failed : %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.Println("State check finished, see log for more details.")
|
||||
|
||||
if len(result.MissingMessages) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
f.Println("\n\nSome missing messages were detected. Bridge can download these messages for you")
|
||||
f.Println("in a directory which you can later send to the developers for analysis.\n")
|
||||
f.Println(bold("Note that the Messages will be stored unencrypted on disk.") + " If you do not wish")
|
||||
f.Println("to continue, input no in the prompt below.\n")
|
||||
|
||||
if !f.yesNoQuestion("Would you like to proceed") {
|
||||
return
|
||||
}
|
||||
|
||||
location, err := os.MkdirTemp("", "debug-state-check-*")
|
||||
if err != nil {
|
||||
f.Printf("Failed to create temporary directory: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.Printf("Messages will be downloaded to: %v\n\n", bold(location))
|
||||
|
||||
if err := f.bridge.DebugDownloadFailedMessages(context.Background(), result, location, func(s string, i int, i2 int) {
|
||||
f.Printf("[%v] Retrieving message %v of %v\n", s, i, i2)
|
||||
}); err != nil {
|
||||
f.Println(err)
|
||||
return
|
||||
}
|
||||
|
||||
c.Printf("\nMessage download finished. Data is available at %v\n", bold(location))
|
||||
}
|
|
@ -312,6 +312,19 @@ func New(
|
|||
})
|
||||
fe.AddCmd(telemetryCmd)
|
||||
|
||||
dbgCmd := &ishell.Cmd{
|
||||
Name: "debug",
|
||||
Help: "Debug diagnostics ",
|
||||
}
|
||||
|
||||
dbgCmd.AddCmd(&ishell.Cmd{
|
||||
Name: "mailbox-state",
|
||||
Help: "Verify local mailbox state against proton server state",
|
||||
Func: fe.debugMailboxState,
|
||||
})
|
||||
|
||||
fe.AddCmd(dbgCmd)
|
||||
|
||||
go fe.watchEvents(eventCh)
|
||||
|
||||
go func() {
|
||||
|
|
|
@ -1,69 +0,0 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
func clearLogs(logDir string, maxLogs int, maxCrashes int) error {
|
||||
files, err := os.ReadDir(logDir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read log directory: %w", err)
|
||||
}
|
||||
|
||||
names := xslices.Map(files, func(file fs.DirEntry) string {
|
||||
return file.Name()
|
||||
})
|
||||
|
||||
// Remove old logs.
|
||||
removeOldLogs(logDir, xslices.Filter(names, func(name string) bool {
|
||||
return MatchLogName(name) && !MatchStackTraceName(name)
|
||||
}), maxLogs)
|
||||
|
||||
// Remove old stack traces.
|
||||
removeOldLogs(logDir, xslices.Filter(names, func(name string) bool {
|
||||
return MatchLogName(name) && MatchStackTraceName(name)
|
||||
}), maxCrashes)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeOldLogs(dir string, names []string, max int) {
|
||||
if count := len(names); count <= max {
|
||||
return
|
||||
}
|
||||
|
||||
// Sort by timestamp, oldest first.
|
||||
slices.SortFunc(names, func(a, b string) bool {
|
||||
return getLogTime(a) < getLogTime(b)
|
||||
})
|
||||
|
||||
for _, path := range xslices.Map(names[:len(names)-max], func(name string) string { return filepath.Join(dir, name) }) {
|
||||
if err := os.Remove(path); err != nil {
|
||||
logrus.WithError(err).WithField("path", path).Warn("Failed to remove old log file")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,170 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
var (
|
||||
errNoInputFile = errors.New("no file was provided to put in the archive")
|
||||
errCannotFitAnyFile = errors.New("no file can fit in the archive")
|
||||
)
|
||||
|
||||
// zipFilesWithMaxSize compress the maximum number of files from the given list that can fit a ZIP archive file whose size does not exceed
|
||||
// maxSize. Input files are taken in order and the function returns as soon as the next file cannot fit, even if another file further in the list
|
||||
// may fit. The function return the number of files that were included in the archive. The files included are filePath[:fileCount].
|
||||
func zipFilesWithMaxSize(filePaths []string, maxSize int64) (buffer *bytes.Buffer, fileCount int, err error) {
|
||||
if len(filePaths) == 0 {
|
||||
return nil, 0, errNoInputFile
|
||||
}
|
||||
buffer, err = createZipFromFile(filePaths[0])
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if int64(buffer.Len()) > maxSize {
|
||||
return nil, 0, errCannotFitAnyFile
|
||||
}
|
||||
|
||||
fileCount = 1
|
||||
var previousBuffer *bytes.Buffer
|
||||
|
||||
for _, filePath := range filePaths[1:] {
|
||||
previousBuffer = cloneBuffer(buffer)
|
||||
|
||||
zipReader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes())))
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
buffer, err = addFileToArchive(zipReader, filePath)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if int64(buffer.Len()) > maxSize {
|
||||
return previousBuffer, fileCount, nil
|
||||
}
|
||||
|
||||
fileCount++
|
||||
}
|
||||
|
||||
return buffer, fileCount, nil
|
||||
}
|
||||
|
||||
// cloneBuffer clones a buffer.
|
||||
func cloneBuffer(buffer *bytes.Buffer) *bytes.Buffer {
|
||||
return bytes.NewBuffer(bytes.Clone(buffer.Bytes()))
|
||||
}
|
||||
|
||||
// createZip creates a zip archive containing a single file.
|
||||
func createZipFromFile(filePath string) (*bytes.Buffer, error) {
|
||||
file, err := os.Open(filePath) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
return createZip(file, filepath.Base(filePath))
|
||||
}
|
||||
|
||||
// createZip creates a zip file containing a file names filename with content read from reader.
|
||||
func createZip(reader io.Reader, filename string) (*bytes.Buffer, error) {
|
||||
b := bytes.NewBuffer(make([]byte, 0))
|
||||
zipWriter := zip.NewWriter(b)
|
||||
|
||||
f, err := zipWriter.Create(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err = io.Copy(f, reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = zipWriter.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// addToArchive adds a file to an archive. Because go zip package does not support adding a file to existing (closed) archive file, the way to do it
|
||||
// is to create a new archive copy the raw content of the archive to the new one and add the new file before closing the archive.
|
||||
func addFileToArchive(zipReader *zip.Reader, filePath string) (*bytes.Buffer, error) {
|
||||
file, err := os.Open(filePath) //nolint:gosec
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
return addToArchive(zipReader, file, filepath.Base(filePath))
|
||||
}
|
||||
|
||||
// addToArchive adds data from a reader to a file in an archive.
|
||||
func addToArchive(zipReader *zip.Reader, reader io.Reader, filename string) (*bytes.Buffer, error) {
|
||||
buffer := bytes.NewBuffer([]byte{})
|
||||
zipWriter := zip.NewWriter(buffer)
|
||||
|
||||
if err := copyZipContent(zipReader, zipWriter); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := zipWriter.Create(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(f, reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := zipWriter.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buffer, nil
|
||||
}
|
||||
|
||||
// copyZipContent copies the content of a zip to another without recompression.
|
||||
func copyZipContent(zipReader *zip.Reader, zipWriter *zip.Writer) error {
|
||||
for _, zipItem := range zipReader.File {
|
||||
itemReader, err := zipItem.OpenRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
header := zipItem.FileHeader
|
||||
targetItem, err := zipWriter.CreateRaw(&header)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(targetItem, itemReader); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,134 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLogging_LogCompression(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
files := []fileInfo{
|
||||
{filepath.Join(dir, "1.log"), 100000},
|
||||
{filepath.Join(dir, "2.log"), 200000},
|
||||
{filepath.Join(dir, "3.log"), 300000},
|
||||
}
|
||||
|
||||
// Files will have a content and size (relative to the zip format overhead) that ensure a compression ratio of roughly 2:1.
|
||||
createRandomFiles(t, files)
|
||||
paths := xslices.Map(files, func(fileInfo fileInfo) string { return fileInfo.filename })
|
||||
|
||||
// Case 1: no input file.
|
||||
_, _, err := zipFilesWithMaxSize([]string{}, 10)
|
||||
require.ErrorIs(t, err, errNoInputFile)
|
||||
|
||||
// Case 2: limit to low, no file can be included.
|
||||
_, _, err = zipFilesWithMaxSize(paths, 100)
|
||||
require.ErrorIs(t, err, errCannotFitAnyFile)
|
||||
|
||||
// case 3: 1 file fits.
|
||||
buffer, fileCount, err := zipFilesWithMaxSize(paths, 100000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, fileCount)
|
||||
checkZipFileContent(t, buffer, paths[0:1])
|
||||
|
||||
// case 4: 2 files fit.
|
||||
buffer, fileCount, err = zipFilesWithMaxSize(paths, 200000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fileCount)
|
||||
checkZipFileContent(t, buffer, paths[0:2])
|
||||
|
||||
// case 5: 3 files fit.
|
||||
buffer, fileCount, err = zipFilesWithMaxSize(paths, 500000)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, fileCount)
|
||||
checkZipFileContent(t, buffer, paths)
|
||||
}
|
||||
|
||||
func createRandomFiles(t *testing.T, files []fileInfo) {
|
||||
// The file is crafted to have a compression ratio of roughly 2:1 by filling the first half with random data, and the second with zeroes.
|
||||
for _, file := range files {
|
||||
randomData := make([]byte, file.size)
|
||||
_, err := rand.Read(randomData[:file.size/2])
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, os.WriteFile(file.filename, randomData, 0660))
|
||||
}
|
||||
}
|
||||
|
||||
func checkZipFileContent(t *testing.T, buffer *bytes.Buffer, expectedFilePaths []string) {
|
||||
dir := t.TempDir()
|
||||
count := unzipFile(t, buffer, dir)
|
||||
require.Equal(t, len(expectedFilePaths), count)
|
||||
for _, file := range expectedFilePaths {
|
||||
checkFilesAreIdentical(t, file, filepath.Join(dir, filepath.Base(file)))
|
||||
}
|
||||
}
|
||||
|
||||
func unzipFile(t *testing.T, buffer *bytes.Buffer, dir string) int {
|
||||
reader, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(len(buffer.Bytes())))
|
||||
require.NoError(t, err)
|
||||
|
||||
for _, f := range reader.File {
|
||||
info := f.FileInfo()
|
||||
require.False(t, info.IsDir())
|
||||
require.Equal(t, filepath.Base(info.Name()), info.Name()) // no sub-folder
|
||||
extractFileFromZip(t, f, filepath.Join(dir, f.Name))
|
||||
}
|
||||
|
||||
return len(reader.File)
|
||||
}
|
||||
|
||||
func extractFileFromZip(t *testing.T, zip *zip.File, path string) {
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, zip.Mode())
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = file.Close() }()
|
||||
|
||||
reader, err := zip.Open()
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = reader.Close() }()
|
||||
|
||||
_, err = io.Copy(file, reader)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func checkFilesAreIdentical(t *testing.T, path1, path2 string) {
|
||||
require.EqualValues(t, sha256Sum(t, path1), sha256Sum(t, path2))
|
||||
}
|
||||
|
||||
func sha256Sum(t *testing.T, path string) []byte {
|
||||
f, err := os.Open(path)
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = f.Close() }()
|
||||
|
||||
hash := sha256.New()
|
||||
_, err = io.Copy(hash, f)
|
||||
require.NoError(t, err)
|
||||
|
||||
return hash.Sum(nil)
|
||||
}
|
|
@ -23,16 +23,15 @@ import (
|
|||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime/pprof"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/crash"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func DumpStackTrace(logsPath string) crash.RecoveryAction {
|
||||
func DumpStackTrace(logsPath string, sessionID SessionID, appName AppName) crash.RecoveryAction {
|
||||
return func(r interface{}) error {
|
||||
file := filepath.Join(logsPath, getStackTraceName(constants.Version, constants.Revision))
|
||||
file := filepath.Join(logsPath, getStackTraceName(sessionID, appName, constants.Version, constants.Tag))
|
||||
|
||||
f, err := os.OpenFile(filepath.Clean(file), os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0o600)
|
||||
if err != nil {
|
||||
|
@ -53,10 +52,10 @@ func DumpStackTrace(logsPath string) crash.RecoveryAction {
|
|||
}
|
||||
}
|
||||
|
||||
func getStackTraceName(version, revision string) string {
|
||||
return fmt.Sprintf("v%v_%v_crash_%v.log", version, revision, time.Now().Unix())
|
||||
func getStackTraceName(sessionID SessionID, appName AppName, version, tag string) string {
|
||||
return fmt.Sprintf("%v_%v_000_v%v_%v_crash.log", sessionID, appName, version, tag)
|
||||
}
|
||||
|
||||
func MatchStackTraceName(name string) bool {
|
||||
return regexp.MustCompile(`^v.*_crash_.*\.log$`).MatchString(name)
|
||||
return regexp.MustCompile(`^\d{8}_\d{9}_.*_000_.*_crash\.log$`).MatchString(name)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLogging_MatchStackTraceName(t *testing.T) {
|
||||
filename := getStackTraceName(NewSessionID(), constants.AppName, constants.Version, constants.Tag)
|
||||
require.True(t, len(filename) > 0)
|
||||
require.True(t, MatchStackTraceName(filename))
|
||||
require.False(t, MatchStackTraceName("Invalid.log"))
|
||||
}
|
|
@ -18,32 +18,37 @@
|
|||
package logging
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxLogSize defines the maximum log size we should permit: 5 MB
|
||||
// DefaultMaxLogFileSize defines the maximum log size we should permit: 5 MB
|
||||
//
|
||||
// The Zendesk limit for an attachement is 50MB and this is what will
|
||||
// The Zendesk limit for an attachment is 50MB and this is what will
|
||||
// be allowed via the API. However, if that fails for some reason, the
|
||||
// fallback is sending the report via email, which has a limit of 10mb
|
||||
// total or 7MB per file. Since we can produce up to 6 logs, and we
|
||||
// compress all the files (avarage compression - 80%), we need to have
|
||||
// a limit of 30MB total before compression, hence 5MB per log file.
|
||||
MaxLogSize = 5 * 1024 * 1024
|
||||
// total or 7MB per file.
|
||||
DefaultMaxLogFileSize = 5 * 1024 * 1024
|
||||
)
|
||||
|
||||
// MaxLogs defines how many log files should be kept.
|
||||
MaxLogs = 10
|
||||
type AppName string
|
||||
|
||||
const (
|
||||
BridgeShortAppName AppName = "bri"
|
||||
LauncherShortAppName AppName = "lau"
|
||||
GUIShortAppName AppName = "gui"
|
||||
)
|
||||
|
||||
type coloredStdOutHook struct {
|
||||
|
@ -82,7 +87,9 @@ func (cs *coloredStdOutHook) Fire(entry *logrus.Entry) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func Init(logsPath, level string) error {
|
||||
// Init Initialize logging. Log files are rotated when their size exceeds rotationSize. if pruningSize >= 0, pruning occurs using
|
||||
// the default pruning algorithm.
|
||||
func Init(logsPath string, sessionID SessionID, appName AppName, rotationSize, pruningSize int64, level string) (io.Closer, error) {
|
||||
logrus.SetFormatter(&logrus.TextFormatter{
|
||||
DisableColors: true,
|
||||
FullTimestamp: true,
|
||||
|
@ -91,20 +98,80 @@ func Init(logsPath, level string) error {
|
|||
|
||||
logrus.AddHook(newColoredStdOutHook())
|
||||
|
||||
rotator, err := NewRotator(MaxLogSize, func() (io.WriteCloser, error) {
|
||||
if err := clearLogs(logsPath, MaxLogs, MaxLogs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return os.Create(filepath.Join(logsPath, getLogName(constants.Version, constants.Revision))) //nolint:gosec // G304
|
||||
})
|
||||
rotator, err := NewDefaultRotator(logsPath, sessionID, appName, rotationSize, pruningSize)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logrus.SetOutput(rotator)
|
||||
|
||||
return setLevel(level)
|
||||
return rotator, setLevel(level)
|
||||
}
|
||||
|
||||
// Close closes the log file. if closer is nil, no error is reported.
|
||||
func Close(closer io.Closer) error {
|
||||
if closer == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
logrus.SetOutput(os.Stdout)
|
||||
return closer.Close()
|
||||
}
|
||||
|
||||
// ZipLogsForBugReport returns an archive containing the logs for bug report.
|
||||
func ZipLogsForBugReport(logsPath string, maxSessionCount int, maxZipSize int64) (*bytes.Buffer, error) {
|
||||
paths, err := getOrderedLogFileListForBugReport(logsPath, maxSessionCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
buffer, _, err := zipFilesWithMaxSize(paths, maxZipSize)
|
||||
return buffer, err
|
||||
}
|
||||
|
||||
// getOrderedLogFileListForBugReport returns the ordered list of log file paths to include in the user triggered bug reports. Only the last
|
||||
// maxSessionCount sessions are included. Priorities:
|
||||
// - session in chronologically descending order.
|
||||
// - for each session: last 2 bridge logs, first bridge log, gui logs, launcher logs, all other bridge logs.
|
||||
func getOrderedLogFileListForBugReport(logsPath string, maxSessionCount int) ([]string, error) {
|
||||
sessionInfoList, err := buildSessionInfoList(logsPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sortedSessions := maps.Values(sessionInfoList)
|
||||
slices.SortFunc(sortedSessions, func(lhs, rhs *sessionInfo) bool { return lhs.sessionID > rhs.sessionID })
|
||||
count := len(sortedSessions)
|
||||
if count > maxSessionCount {
|
||||
sortedSessions = sortedSessions[:maxSessionCount]
|
||||
}
|
||||
|
||||
filePathFunc := func(logFileInfo logFileInfo) string { return filepath.Join(logsPath, logFileInfo.filename) }
|
||||
|
||||
var result []string
|
||||
for _, session := range sortedSessions {
|
||||
bridgeLogCount := len(session.bridgeLogs)
|
||||
if bridgeLogCount > 0 {
|
||||
result = append(result, filepath.Join(logsPath, session.bridgeLogs[bridgeLogCount-1].filename))
|
||||
}
|
||||
if bridgeLogCount > 1 {
|
||||
result = append(result, filepath.Join(logsPath, session.bridgeLogs[bridgeLogCount-2].filename))
|
||||
}
|
||||
if bridgeLogCount > 2 {
|
||||
result = append(result, filepath.Join(logsPath, session.bridgeLogs[0].filename))
|
||||
}
|
||||
if len(session.guiLogs) > 0 {
|
||||
result = append(result, xslices.Map(session.guiLogs, filePathFunc)...)
|
||||
}
|
||||
if len(session.launcherLogs) > 0 {
|
||||
result = append(result, xslices.Map(session.launcherLogs, filePathFunc)...)
|
||||
}
|
||||
if bridgeLogCount > 3 {
|
||||
result = append(result, xslices.Map(session.bridgeLogs[1:bridgeLogCount-2], filePathFunc)...)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// setLevel will change the level of logging and in case of Debug or Trace
|
||||
|
@ -137,34 +204,51 @@ func setLevel(level string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func getLogName(version, revision string) string {
|
||||
return fmt.Sprintf("v%v_%v_%v.log", version, revision, time.Now().Unix())
|
||||
}
|
||||
func getLogSessionID(filename string) (SessionID, error) {
|
||||
re := regexp.MustCompile(`^(?P<sessionID>\d{8}_\d{9})_.*\.log$`)
|
||||
|
||||
func getLogTime(name string) int {
|
||||
re := regexp.MustCompile(`^v.*_.*_(?P<timestamp>\d+).log$`)
|
||||
|
||||
match := re.FindStringSubmatch(name)
|
||||
match := re.FindStringSubmatch(filename)
|
||||
|
||||
errInvalidFileName := errors.New("log file name is invalid")
|
||||
if len(match) == 0 {
|
||||
logrus.Warn("Could not parse log name: ", name)
|
||||
return 0
|
||||
logrus.WithField("filename", filename).Warn("Could not parse log filename")
|
||||
return "", errInvalidFileName
|
||||
}
|
||||
|
||||
timestamp, err := strconv.Atoi(match[re.SubexpIndex("timestamp")])
|
||||
index := re.SubexpIndex("sessionID")
|
||||
if index < 0 {
|
||||
logrus.WithField("filename", filename).Warn("Could not parse log filename")
|
||||
return "", errInvalidFileName
|
||||
}
|
||||
|
||||
return SessionID(match[index]), nil
|
||||
}
|
||||
|
||||
func getLogTime(filename string) time.Time {
|
||||
sessionID, err := getLogSessionID(filename)
|
||||
if err != nil {
|
||||
return 0
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
return timestamp
|
||||
return sessionID.toTime()
|
||||
}
|
||||
|
||||
func MatchLogName(name string) bool {
|
||||
return regexp.MustCompile(`^v.*\.log$`).MatchString(name)
|
||||
// MatchBridgeLogName return true iff filename is a bridge log filename.
|
||||
func MatchBridgeLogName(filename string) bool {
|
||||
return matchLogName(filename, BridgeShortAppName)
|
||||
}
|
||||
|
||||
func MatchGUILogName(name string) bool {
|
||||
return regexp.MustCompile(`^gui_v.*\.log$`).MatchString(name)
|
||||
// MatchGUILogName return true iff filename is a bridge-gui log filename.
|
||||
func MatchGUILogName(filename string) bool {
|
||||
return matchLogName(filename, GUIShortAppName)
|
||||
}
|
||||
|
||||
// MatchLauncherLogName return true iff filename is a launcher log filename.
|
||||
func MatchLauncherLogName(filename string) bool {
|
||||
return matchLogName(filename, LauncherShortAppName)
|
||||
}
|
||||
|
||||
func matchLogName(logName string, appName AppName) bool {
|
||||
return regexp.MustCompile(`^\d{8}_\d{9}_\Q` + string(appName) + `\E_\d{3}_.*\.log$`).MatchString(logName)
|
||||
}
|
||||
|
||||
type logKey string
|
||||
|
@ -180,12 +264,3 @@ func WithLogrusField(ctx context.Context, key string, value interface{}) context
|
|||
fields[key] = value
|
||||
return context.WithValue(ctx, logrusFields, fields)
|
||||
}
|
||||
|
||||
func LogFromContext(ctx context.Context) *logrus.Entry {
|
||||
fields, ok := ctx.Value(logrusFields).(logrus.Fields)
|
||||
if !ok || fields == nil {
|
||||
return logrus.WithField("ctx", "empty")
|
||||
}
|
||||
|
||||
return logrus.WithFields(fields)
|
||||
}
|
||||
|
|
|
@ -22,62 +22,84 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestClearLogs tests that cearLogs removes only bridge old log files keeping last three of them.
|
||||
func TestClearLogs(t *testing.T) {
|
||||
func TestLogging_GetLogTime(t *testing.T) {
|
||||
sessionID := NewSessionID()
|
||||
fp := defaultFileProvider(os.TempDir(), sessionID, "bridge-test")
|
||||
wc, err := fp(0)
|
||||
require.NoError(t, err)
|
||||
file, ok := wc.(*os.File)
|
||||
require.True(t, ok)
|
||||
|
||||
sessionIDTime := sessionID.toTime()
|
||||
require.False(t, sessionIDTime.IsZero())
|
||||
logTime := getLogTime(filepath.Base(file.Name()))
|
||||
require.False(t, logTime.IsZero())
|
||||
require.Equal(t, sessionIDTime, logTime)
|
||||
}
|
||||
|
||||
func TestLogging_MatchLogName(t *testing.T) {
|
||||
bridgeLog := "20230602_094633102_bri_000_v3.0.99+git_5b650b1be3.log"
|
||||
crashLog := "20230602_094633102_bri_000_v3.0.99+git_5b650b1be3_crash.log"
|
||||
guiLog := "20230602_094633102_gui_000_v3.0.99+git_5b650b1be3.log"
|
||||
launcherLog := "20230602_094633102_lau_000_v3.0.99+git_5b650b1be3.log"
|
||||
require.True(t, MatchBridgeLogName(bridgeLog))
|
||||
require.False(t, MatchGUILogName(bridgeLog))
|
||||
require.False(t, MatchLauncherLogName(bridgeLog))
|
||||
require.True(t, MatchBridgeLogName(crashLog))
|
||||
require.False(t, MatchGUILogName(crashLog))
|
||||
require.False(t, MatchLauncherLogName(crashLog))
|
||||
require.False(t, MatchBridgeLogName(guiLog))
|
||||
require.True(t, MatchGUILogName(guiLog))
|
||||
require.False(t, MatchLauncherLogName(guiLog))
|
||||
require.False(t, MatchBridgeLogName(launcherLog))
|
||||
require.False(t, MatchGUILogName(launcherLog))
|
||||
require.True(t, MatchLauncherLogName(launcherLog))
|
||||
}
|
||||
|
||||
func TestLogging_GetOrderedLogFileListForBugReport(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Create some old log files.
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "other.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.4.7_debe87f2f5_0000000001.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.4.8_debe87f2f5_0000000002.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.4.9_debe87f2f5_0000000003.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.0_debe87f2f5_0000000004.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.1_debe87f2f5_0000000005.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.2_debe87f2f5_0000000006.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.3_debe87f2f5_0000000007.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.4_debe87f2f5_0000000008.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.5_debe87f2f5_0000000009.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.6_debe87f2f5_0000000010.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.7_debe87f2f5_0000000011.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.8_debe87f2f5_0000000012.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.12_debe87f2f5_0000000013.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.9_debe87f2f5_0000000014.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.10_debe87f2f5_0000000015.log"), []byte("Hello"), 0o755))
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "v2.5.11_debe87f2f5_0000000016.log"), []byte("Hello"), 0o755))
|
||||
|
||||
// Clear the logs.
|
||||
require.NoError(t, clearLogs(dir, 3, 0))
|
||||
|
||||
// We should only clear matching files, and keep the 3 most recent ones.
|
||||
checkFileNames(t, dir, []string{
|
||||
"other.log",
|
||||
"v2.5.9_debe87f2f5_0000000014.log",
|
||||
"v2.5.10_debe87f2f5_0000000015.log",
|
||||
"v2.5.11_debe87f2f5_0000000016.log",
|
||||
})
|
||||
}
|
||||
|
||||
func checkFileNames(t *testing.T, dir string, expectedFileNames []string) {
|
||||
require.ElementsMatch(t, expectedFileNames, getFileNames(t, dir))
|
||||
}
|
||||
|
||||
func getFileNames(t *testing.T, dir string) []string {
|
||||
files, err := os.ReadDir(dir)
|
||||
filePaths, err := getOrderedLogFileListForBugReport(dir, 3)
|
||||
require.NoError(t, err)
|
||||
require.True(t, len(filePaths) == 0)
|
||||
|
||||
fileNames := []string{}
|
||||
for _, file := range files {
|
||||
fileNames = append(fileNames, file.Name())
|
||||
if file.IsDir() {
|
||||
subDir := filepath.Join(dir, file.Name())
|
||||
subFileNames := getFileNames(t, subDir)
|
||||
for _, subFileName := range subFileNames {
|
||||
fileNames = append(fileNames, file.Name()+"/"+subFileName)
|
||||
}
|
||||
}
|
||||
}
|
||||
return fileNames
|
||||
require.NoError(t, os.WriteFile(filepath.Join(dir, "invalid.log"), []byte("proton"), 0660))
|
||||
|
||||
_ = createDummySession(t, dir, 1000, 250, 500, 3000)
|
||||
sessionID1 := createDummySession(t, dir, 1000, 250, 500, 500)
|
||||
sessionID2 := createDummySession(t, dir, 1000, 250, 500, 500)
|
||||
sessionID3 := createDummySession(t, dir, 1000, 250, 500, 4500)
|
||||
|
||||
filePaths, err = getOrderedLogFileListForBugReport(dir, 3)
|
||||
fileSuffix := "_v" + constants.Version + "_" + constants.Tag + ".log"
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, []string{
|
||||
filepath.Join(dir, string(sessionID3)+"_bri_004"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID3)+"_bri_003"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID3)+"_bri_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID3)+"_gui_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID3)+"_lau_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID3)+"_bri_001"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID3)+"_bri_002"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID2)+"_bri_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID2)+"_gui_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID2)+"_lau_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID1)+"_bri_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID1)+"_gui_000"+fileSuffix),
|
||||
filepath.Join(dir, string(sessionID1)+"_lau_000"+fileSuffix),
|
||||
}, filePaths)
|
||||
}
|
||||
|
||||
func TestLogging_Close(t *testing.T) {
|
||||
d := t.TempDir()
|
||||
closer, err := Init(d, NewSessionID(), constants.AppName, 1, DefaultPruningSize, "debug")
|
||||
require.NoError(t, err)
|
||||
logrus.Debug("Test") // because we set max log file size to 1, this will force a rotation of the log file.
|
||||
require.NotNil(t, closer)
|
||||
require.NoError(t, closer.Close())
|
||||
}
|
||||
|
|
|
@ -0,0 +1,230 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultPruningSize = 1024 * 1024 * 200
|
||||
NoPruning = -1
|
||||
)
|
||||
|
||||
type Pruner func() (failureCount int, err error)
|
||||
|
||||
type logFileInfo struct {
|
||||
filename string
|
||||
size int64
|
||||
}
|
||||
|
||||
type sessionInfo struct {
|
||||
dir string
|
||||
sessionID SessionID
|
||||
launcherLogs []logFileInfo
|
||||
guiLogs []logFileInfo
|
||||
bridgeLogs []logFileInfo
|
||||
}
|
||||
|
||||
func defaultPruner(logsDir string, currentSessionID SessionID, pruningSize int64) func() (failureCount int, err error) {
|
||||
return func() (int, error) {
|
||||
return pruneLogs(logsDir, currentSessionID, pruningSize)
|
||||
}
|
||||
}
|
||||
|
||||
func nullPruner() (failureCount int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// DefaultPruner gets rid of the older log files according to the following policy:
|
||||
// - We will limit the total size of the log files to roughly pruningSize.
|
||||
// The current session is included in this quota, in order not to grow indefinitely on setups where bridge can run uninterrupted for months.
|
||||
// - If the current session's log files total size is above the pruning size, we delete all other sessions log. For the current we keep
|
||||
// launcher and gui log (they're limited to a few kb at most by nature), and we have n bridge log files,
|
||||
// We keep the first and list log file (startup information contains relevant information, notably the SentryID), and start deleting the other
|
||||
// starting with the oldest until the total size drops below the pruning size.
|
||||
// - Otherwise: If the total size of log files for all sessions exceeds pruningSize, sessions gets deleted starting with the oldest, until the size
|
||||
// drops below the pruning size. Sessions are treated atomically. Current session is left untouched in that case.
|
||||
func pruneLogs(logDir string, currentSessionID SessionID, pruningSize int64) (failureCount int, err error) {
|
||||
sessionInfoList, err := buildSessionInfoList(logDir)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// we want total size to include the current session.
|
||||
totalSize := xslices.Reduce(maps.Values(sessionInfoList), int64(0), func(sum int64, info *sessionInfo) int64 { return sum + info.size() })
|
||||
if totalSize <= pruningSize {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
currentSessionInfo, ok := sessionInfoList[currentSessionID]
|
||||
if ok {
|
||||
delete(sessionInfoList, currentSessionID)
|
||||
|
||||
if currentSessionInfo.size() > pruningSize {
|
||||
// current session is already too big. We delete all other sessions and prune the current session.
|
||||
for _, session := range sessionInfoList {
|
||||
failureCount += session.deleteFiles()
|
||||
}
|
||||
|
||||
failureCount += currentSessionInfo.pruneAsCurrentSession(pruningSize)
|
||||
return failureCount, nil
|
||||
}
|
||||
}
|
||||
|
||||
// current session size if below max size, so we erase older session starting with the eldest until we go below maxFileSize
|
||||
sortedSessions := maps.Values(sessionInfoList)
|
||||
slices.SortFunc(sortedSessions, func(lhs, rhs *sessionInfo) bool { return lhs.sessionID < rhs.sessionID })
|
||||
for _, sessionInfo := range sortedSessions {
|
||||
totalSize -= sessionInfo.size()
|
||||
failureCount += sessionInfo.deleteFiles()
|
||||
if totalSize <= pruningSize {
|
||||
return failureCount, nil
|
||||
}
|
||||
}
|
||||
|
||||
return failureCount, nil
|
||||
}
|
||||
|
||||
func newSessionInfo(dir string, sessionID SessionID) (*sessionInfo, error) {
|
||||
paths, err := filepath.Glob(filepath.Join(dir, string(sessionID)+"_*.log"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rx := regexp.MustCompile(`^\Q` + string(sessionID) + `\E_([^_]*)_\d+_.*\.log$`)
|
||||
|
||||
result := sessionInfo{sessionID: sessionID, dir: dir}
|
||||
for _, path := range paths {
|
||||
filename := filepath.Base(path)
|
||||
match := rx.FindStringSubmatch(filename)
|
||||
if len(match) != 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
stats, err := os.Stat(path)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
fileInfo := logFileInfo{
|
||||
filename: filename,
|
||||
size: stats.Size(),
|
||||
}
|
||||
|
||||
switch AppName(match[1]) {
|
||||
case LauncherShortAppName:
|
||||
result.launcherLogs = append(result.launcherLogs, fileInfo)
|
||||
case GUIShortAppName:
|
||||
result.guiLogs = append(result.guiLogs, fileInfo)
|
||||
case BridgeShortAppName:
|
||||
result.bridgeLogs = append(result.bridgeLogs, fileInfo)
|
||||
}
|
||||
}
|
||||
|
||||
lessFunc := func(lhs, rhs logFileInfo) bool { return strings.Compare(lhs.filename, rhs.filename) < 0 }
|
||||
slices.SortFunc(result.launcherLogs, lessFunc)
|
||||
slices.SortFunc(result.guiLogs, lessFunc)
|
||||
slices.SortFunc(result.bridgeLogs, lessFunc)
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func (s *sessionInfo) size() int64 {
|
||||
summer := func(accum int64, logInfo logFileInfo) int64 { return accum + logInfo.size }
|
||||
size := xslices.Reduce(s.launcherLogs, 0, summer)
|
||||
size = xslices.Reduce(s.guiLogs, size, summer)
|
||||
size = xslices.Reduce(s.bridgeLogs, size, summer)
|
||||
return size
|
||||
}
|
||||
|
||||
func (s *sessionInfo) deleteFiles() (failureCount int) {
|
||||
var allLogs []logFileInfo
|
||||
allLogs = append(allLogs, s.launcherLogs...)
|
||||
allLogs = append(allLogs, s.guiLogs...)
|
||||
allLogs = append(allLogs, s.bridgeLogs...)
|
||||
|
||||
for _, log := range allLogs {
|
||||
if err := os.Remove(filepath.Join(s.dir, log.filename)); err != nil {
|
||||
failureCount++
|
||||
}
|
||||
}
|
||||
|
||||
return failureCount
|
||||
}
|
||||
|
||||
func (s *sessionInfo) pruneAsCurrentSession(pruningSize int64) (failureCount int) {
|
||||
// when pruning the current session, we keep the launcher and GUI logs, the first and last bridge log file
|
||||
// and we delete intermediate bridge logs until the size constraint is satisfied (or there nothing left to delete).
|
||||
if len(s.bridgeLogs) < 3 {
|
||||
return 0
|
||||
}
|
||||
|
||||
size := s.size()
|
||||
if size <= pruningSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
for _, fileInfo := range s.bridgeLogs[1 : len(s.bridgeLogs)-1] {
|
||||
if err := os.Remove(filepath.Join(s.dir, fileInfo.filename)); err != nil {
|
||||
failureCount++
|
||||
}
|
||||
size -= fileInfo.size
|
||||
if size <= pruningSize {
|
||||
return failureCount
|
||||
}
|
||||
}
|
||||
|
||||
return failureCount
|
||||
}
|
||||
|
||||
func buildSessionInfoList(dir string) (map[SessionID]*sessionInfo, error) {
|
||||
result := make(map[SessionID]*sessionInfo)
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, entry := range entries {
|
||||
if entry.IsDir() {
|
||||
continue
|
||||
}
|
||||
rx := regexp.MustCompile(`^(\d{8}_\d{9})_.*\.log$`)
|
||||
match := rx.FindStringSubmatch(entry.Name())
|
||||
if match == nil || len(match) < 2 {
|
||||
continue
|
||||
}
|
||||
|
||||
sessionID := SessionID(match[1])
|
||||
if _, ok := result[sessionID]; !ok {
|
||||
sessionInfo, err := newSessionInfo(dir, sessionID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result[sessionID] = sessionInfo
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type fileInfo struct {
|
||||
filename string
|
||||
size int64
|
||||
}
|
||||
|
||||
var logFileSuffix = "_v" + constants.Version + "_" + constants.Tag + ".log"
|
||||
|
||||
func TestLogging_Pruning(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
const maxLogSize = 100
|
||||
sessionID1 := createDummySession(t, dir, maxLogSize, 50, 50, 250)
|
||||
sessionID2 := createDummySession(t, dir, maxLogSize, 100, 100, 350)
|
||||
sessionID3 := createDummySession(t, dir, maxLogSize, 150, 100, 350)
|
||||
|
||||
// Expected files per session
|
||||
session1Files := []fileInfo{
|
||||
{filename: string(sessionID1) + "_lau_000" + logFileSuffix, size: 50},
|
||||
{filename: string(sessionID1) + "_gui_000" + logFileSuffix, size: 50},
|
||||
{filename: string(sessionID1) + "_bri_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID1) + "_bri_001" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID1) + "_bri_002" + logFileSuffix, size: 50},
|
||||
}
|
||||
|
||||
session2Files := []fileInfo{
|
||||
{filename: string(sessionID2) + "_lau_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID2) + "_gui_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID2) + "_bri_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID2) + "_bri_001" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID2) + "_bri_002" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID2) + "_bri_003" + logFileSuffix, size: 50},
|
||||
}
|
||||
|
||||
session3Files := []fileInfo{
|
||||
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID3) + "_lau_001" + logFileSuffix, size: 50},
|
||||
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID3) + "_bri_001" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID3) + "_bri_002" + logFileSuffix, size: 100},
|
||||
{filename: string(sessionID3) + "_bri_003" + logFileSuffix, size: 50},
|
||||
}
|
||||
|
||||
allSessions := session1Files
|
||||
allSessions = append(allSessions, append(session2Files, session3Files...)...)
|
||||
checkFolderContent(t, dir, allSessions...)
|
||||
|
||||
failureCount, err := pruneLogs(dir, sessionID3, 2000) // nothing to prune
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
checkFolderContent(t, dir, allSessions...)
|
||||
|
||||
failureCount, err = pruneLogs(dir, sessionID3, 1200) // session 1 is pruned
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkFolderContent(t, dir, append(session2Files, session3Files...)...)
|
||||
failureCount, err = pruneLogs(dir, sessionID3, 1000) // session 2 is pruned
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
checkFolderContent(t, dir, session3Files...)
|
||||
}
|
||||
|
||||
func TestLogging_PruningBigCurrentSession(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
const maxLogFileSize = 1000
|
||||
sessionID1 := createDummySession(t, dir, maxLogFileSize, 500, 500, 2500)
|
||||
sessionID2 := createDummySession(t, dir, maxLogFileSize, 1000, 1000, 3500)
|
||||
sessionID3 := createDummySession(t, dir, maxLogFileSize, 500, 500, 10500)
|
||||
|
||||
// Expected files per session
|
||||
session1Files := []fileInfo{
|
||||
{filename: string(sessionID1) + "_lau_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID1) + "_gui_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID1) + "_bri_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID1) + "_bri_001" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID1) + "_bri_002" + logFileSuffix, size: 500},
|
||||
}
|
||||
|
||||
session2Files := []fileInfo{
|
||||
{filename: string(sessionID2) + "_lau_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID2) + "_gui_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID2) + "_bri_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID2) + "_bri_001" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID2) + "_bri_002" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID2) + "_bri_003" + logFileSuffix, size: 500},
|
||||
}
|
||||
|
||||
session3Files := []fileInfo{
|
||||
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_001" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_002" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_003" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_004" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_005" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_006" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_007" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_008" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_009" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
|
||||
}
|
||||
|
||||
allSessions := session1Files
|
||||
allSessions = append(allSessions, append(session2Files, session3Files...)...)
|
||||
checkFolderContent(t, dir, allSessions...)
|
||||
|
||||
// current session is bigger than maxFileSize. We keep launcher and gui logs, the first and last bridge log
|
||||
// and only the last bridge log that keep the total file size under the limit.
|
||||
failureCount, err := pruneLogs(dir, sessionID3, 8000)
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
checkFolderContent(t, dir, []fileInfo{
|
||||
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_005" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_006" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_007" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_008" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_009" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
|
||||
}...)
|
||||
|
||||
failureCount, err = pruneLogs(dir, sessionID3, 5000)
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
checkFolderContent(t, dir, []fileInfo{
|
||||
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_008" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_009" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
|
||||
}...)
|
||||
|
||||
// whatever maxFileSize is, we will always keep the following files
|
||||
minimalFiles := []fileInfo{
|
||||
{filename: string(sessionID3) + "_lau_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_gui_000" + logFileSuffix, size: 500},
|
||||
{filename: string(sessionID3) + "_bri_000" + logFileSuffix, size: 1000},
|
||||
{filename: string(sessionID3) + "_bri_010" + logFileSuffix, size: 500},
|
||||
}
|
||||
failureCount, err = pruneLogs(dir, sessionID3, 2000)
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
checkFolderContent(t, dir, minimalFiles...)
|
||||
|
||||
failureCount, err = pruneLogs(dir, sessionID3, 0)
|
||||
require.Equal(t, failureCount, 0)
|
||||
require.NoError(t, err)
|
||||
checkFolderContent(t, dir, minimalFiles...)
|
||||
}
|
||||
|
||||
func createDummySession(t *testing.T, dir string, maxLogFileSize int64, launcherLogSize, guiLogSize, bridgeLogSize int64) SessionID {
|
||||
time.Sleep(2 * time.Millisecond) // ensure our sessionID is unused.
|
||||
sessionID := NewSessionID()
|
||||
if launcherLogSize > 0 {
|
||||
createDummyRotatedLogFile(t, dir, sessionID, LauncherShortAppName, launcherLogSize, maxLogFileSize)
|
||||
}
|
||||
|
||||
if guiLogSize > 0 {
|
||||
createDummyRotatedLogFile(t, dir, sessionID, GUIShortAppName, guiLogSize, maxLogFileSize)
|
||||
}
|
||||
|
||||
if bridgeLogSize > 0 {
|
||||
createDummyRotatedLogFile(t, dir, sessionID, BridgeShortAppName, bridgeLogSize, maxLogFileSize)
|
||||
}
|
||||
|
||||
return sessionID
|
||||
}
|
||||
|
||||
func createDummyRotatedLogFile(t *testing.T, dir string, sessionID SessionID, appName AppName, totalSize, maxLogFileSize int64) {
|
||||
rotator, err := NewDefaultRotator(dir, sessionID, appName, maxLogFileSize, NoPruning)
|
||||
require.NoError(t, err)
|
||||
for i := int64(0); i < totalSize/maxLogFileSize; i++ {
|
||||
count, err := rotator.Write(make([]byte, maxLogFileSize))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(count), maxLogFileSize)
|
||||
}
|
||||
|
||||
remainder := totalSize % maxLogFileSize
|
||||
if remainder > 0 {
|
||||
count, err := rotator.Write(make([]byte, remainder))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, int64(count), remainder)
|
||||
}
|
||||
|
||||
require.NoError(t, rotator.wc.Close())
|
||||
}
|
||||
|
||||
func checkFolderContent(t *testing.T, dir string, fileInfos ...fileInfo) {
|
||||
for _, fi := range fileInfos {
|
||||
checkFileExistsWithSize(t, dir, fi)
|
||||
}
|
||||
|
||||
entries, err := os.ReadDir(dir)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, len(fileInfos), len(entries))
|
||||
}
|
||||
|
||||
func checkFileExistsWithSize(t *testing.T, dir string, info fileInfo) {
|
||||
stat, err := os.Stat(filepath.Join(dir, info.filename))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, stat.Size(), info.size)
|
||||
}
|
|
@ -17,21 +17,39 @@
|
|||
|
||||
package logging
|
||||
|
||||
import "io"
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
)
|
||||
|
||||
type Rotator struct {
|
||||
getFile FileProvider
|
||||
wc io.WriteCloser
|
||||
size int
|
||||
maxSize int
|
||||
getFile FileProvider
|
||||
prune Pruner
|
||||
wc io.WriteCloser
|
||||
size int64
|
||||
maxFileSize int64
|
||||
nextIndex int
|
||||
}
|
||||
|
||||
type FileProvider func() (io.WriteCloser, error)
|
||||
type FileProvider func(index int) (io.WriteCloser, error)
|
||||
|
||||
func NewRotator(maxSize int, getFile FileProvider) (*Rotator, error) {
|
||||
func defaultFileProvider(logsPath string, sessionID SessionID, appName AppName) FileProvider {
|
||||
return func(index int) (io.WriteCloser, error) {
|
||||
return os.Create(filepath.Join(logsPath, //nolint:gosec // G304
|
||||
fmt.Sprintf("%v_%v_%03d_v%v_%v.log", sessionID, appName, index, constants.Version, constants.Tag),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
func NewRotator(maxFileSize int64, getFile FileProvider, prune Pruner) (*Rotator, error) {
|
||||
r := &Rotator{
|
||||
getFile: getFile,
|
||||
maxSize: maxSize,
|
||||
getFile: getFile,
|
||||
prune: prune,
|
||||
maxFileSize: maxFileSize,
|
||||
}
|
||||
|
||||
if err := r.rotate(); err != nil {
|
||||
|
@ -41,8 +59,19 @@ func NewRotator(maxSize int, getFile FileProvider) (*Rotator, error) {
|
|||
return r, nil
|
||||
}
|
||||
|
||||
func NewDefaultRotator(logsPath string, sessionID SessionID, appName AppName, maxLogFileSize, pruningSize int64) (*Rotator, error) {
|
||||
var pruner Pruner
|
||||
if pruningSize < 0 {
|
||||
pruner = nullPruner
|
||||
} else {
|
||||
pruner = defaultPruner(logsPath, sessionID, pruningSize)
|
||||
}
|
||||
|
||||
return NewRotator(maxLogFileSize, defaultFileProvider(logsPath, sessionID, appName), pruner)
|
||||
}
|
||||
|
||||
func (r *Rotator) Write(p []byte) (int, error) {
|
||||
if r.size+len(p) > r.maxSize {
|
||||
if r.size+int64(len(p)) > r.maxFileSize {
|
||||
if err := r.rotate(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
@ -53,21 +82,33 @@ func (r *Rotator) Write(p []byte) (int, error) {
|
|||
return n, err
|
||||
}
|
||||
|
||||
r.size += n
|
||||
|
||||
r.size += int64(n)
|
||||
return n, nil
|
||||
}
|
||||
|
||||
func (r *Rotator) Close() error {
|
||||
if r.wc != nil {
|
||||
return r.wc.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Rotator) rotate() error {
|
||||
if r.wc != nil {
|
||||
_ = r.wc.Close()
|
||||
}
|
||||
|
||||
wc, err := r.getFile()
|
||||
if _, err := r.prune(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
wc, err := r.getFile(r.nextIndex)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.nextIndex++
|
||||
r.wc = wc
|
||||
r.size = 0
|
||||
|
||||
|
|
|
@ -19,8 +19,10 @@ package logging
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -35,15 +37,15 @@ func (c *WriteCloser) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func TestRotator(t *testing.T) {
|
||||
func TestLogging_Rotator(t *testing.T) {
|
||||
n := 0
|
||||
|
||||
getFile := func() (io.WriteCloser, error) {
|
||||
getFile := func(_ int) (io.WriteCloser, error) {
|
||||
n++
|
||||
return &WriteCloser{}, nil
|
||||
}
|
||||
|
||||
r, err := NewRotator(10, getFile)
|
||||
r, err := NewRotator(10, getFile, nullPruner)
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = r.Write([]byte("12345"))
|
||||
|
@ -75,12 +77,89 @@ func TestRotator(t *testing.T) {
|
|||
assert.Equal(t, 4, n)
|
||||
}
|
||||
|
||||
func BenchmarkRotate(b *testing.B) {
|
||||
benchRotate(b, MaxLogSize, getTestFile(b, b.TempDir(), MaxLogSize-1))
|
||||
func TestLogging_DefaultRotator(t *testing.T) {
|
||||
fiveBytes := []byte("00000")
|
||||
tmpDir := os.TempDir()
|
||||
|
||||
sessionID := NewSessionID()
|
||||
basePath := filepath.Join(tmpDir, string(sessionID))
|
||||
|
||||
r, err := NewDefaultRotator(tmpDir, sessionID, "bri", 10, NoPruning)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, countFilesMatching(basePath+"_bri_000_*.log"))
|
||||
require.Equal(t, 1, countFilesMatching(basePath+"*.log"))
|
||||
|
||||
_, err = r.Write(fiveBytes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, countFilesMatching(basePath+"*.log"))
|
||||
|
||||
_, err = r.Write(fiveBytes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, countFilesMatching(basePath+"*.log"))
|
||||
|
||||
_, err = r.Write(fiveBytes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, countFilesMatching(basePath+"*.log"))
|
||||
require.Equal(t, 1, countFilesMatching(basePath+"_bri_001_*.log"))
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
_, err = r.Write(fiveBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
require.NoError(t, r.wc.Close())
|
||||
|
||||
// total written: 35 bytes, i.e. 4 log files
|
||||
logFileCount := countFilesMatching(basePath + "*.log")
|
||||
require.Equal(t, 4, logFileCount)
|
||||
for i := 0; i < logFileCount; i++ {
|
||||
require.Equal(t, 1, countFilesMatching(basePath+fmt.Sprintf("_bri_%03d_*.log", i)))
|
||||
}
|
||||
|
||||
cleanupLogs(t, sessionID)
|
||||
}
|
||||
|
||||
func benchRotate(b *testing.B, logSize int, getFile func() (io.WriteCloser, error)) {
|
||||
r, err := NewRotator(logSize, getFile)
|
||||
func TestLogging_DefaultRotatorWithPruning(t *testing.T) {
|
||||
tenBytes := []byte("0000000000")
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
sessionID := NewSessionID()
|
||||
basePath := filepath.Join(tmpDir, string(sessionID))
|
||||
|
||||
// fill the log dir while below the pruning quota
|
||||
r, err := NewDefaultRotator(tmpDir, sessionID, "bri", 10, 40)
|
||||
require.NoError(t, err)
|
||||
for i := 0; i < 4; i++ {
|
||||
_, err = r.Write(tenBytes)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// from now on at every rotation, (i.e. every write in this case), we will prune, then create a new file.
|
||||
// we should always have 4 files, remaining after prune, plus the newly rotated file with the last written bytes.
|
||||
for i := 0; i < 10; i++ {
|
||||
_, err := r.Write(tenBytes)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 5, countFilesMatching(basePath+"_bri_*.log"))
|
||||
}
|
||||
|
||||
require.NoError(t, r.wc.Close())
|
||||
|
||||
// Final check. 000, 010, 011, 012 are what's left after the last pruning, 013 never got to pass through pruning.
|
||||
checkFolderContent(t, tmpDir, []fileInfo{
|
||||
{filename: string(sessionID) + "_bri_000" + logFileSuffix, size: 10},
|
||||
{filename: string(sessionID) + "_bri_010" + logFileSuffix, size: 10},
|
||||
{filename: string(sessionID) + "_bri_011" + logFileSuffix, size: 10},
|
||||
{filename: string(sessionID) + "_bri_012" + logFileSuffix, size: 10},
|
||||
{filename: string(sessionID) + "_bri_013" + logFileSuffix, size: 10},
|
||||
}...)
|
||||
}
|
||||
|
||||
func BenchmarkRotate(b *testing.B) {
|
||||
benchRotate(b, DefaultMaxLogFileSize, getTestFile(b, b.TempDir(), DefaultMaxLogFileSize-1))
|
||||
}
|
||||
|
||||
func benchRotate(b *testing.B, logSize int64, getFile func(index int) (io.WriteCloser, error)) {
|
||||
r, err := NewRotator(logSize, getFile, nullPruner)
|
||||
require.NoError(b, err)
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
|
@ -92,8 +171,8 @@ func benchRotate(b *testing.B, logSize int, getFile func() (io.WriteCloser, erro
|
|||
}
|
||||
}
|
||||
|
||||
func getTestFile(b *testing.B, dir string, length int) func() (io.WriteCloser, error) {
|
||||
return func() (io.WriteCloser, error) {
|
||||
func getTestFile(b *testing.B, dir string, length int) func(int) (io.WriteCloser, error) {
|
||||
return func(index int) (io.WriteCloser, error) {
|
||||
b.StopTimer()
|
||||
defer b.StartTimer()
|
||||
|
||||
|
@ -113,3 +192,20 @@ func getTestFile(b *testing.B, dir string, length int) func() (io.WriteCloser, e
|
|||
return f, nil
|
||||
}
|
||||
}
|
||||
|
||||
func countFilesMatching(pattern string) int {
|
||||
files, err := filepath.Glob(pattern)
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
return len(files)
|
||||
}
|
||||
|
||||
func cleanupLogs(t *testing.T, sessionID SessionID) {
|
||||
paths, err := filepath.Glob(filepath.Join(os.TempDir(), string(sessionID)+"*.log"))
|
||||
require.NoError(t, err)
|
||||
for _, path := range paths {
|
||||
require.NoError(t, os.Remove(path))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SessionID string
|
||||
|
||||
const (
|
||||
timeFormat = "20060102_150405" // time format in Go does not support milliseconds without dot, so we'll process it manually.
|
||||
)
|
||||
|
||||
// NewSessionID creates a sessionID based on the current time.
|
||||
func NewSessionID() SessionID {
|
||||
now := time.Now()
|
||||
return SessionID(now.Format(timeFormat) + fmt.Sprintf("%03d", now.Nanosecond()/1000000))
|
||||
}
|
||||
|
||||
// NewSessionIDFromString Return a new sessionID from string. If the str is empty a new time based sessionID is returned, otherwise the string
|
||||
// is used as the sessionID.
|
||||
func NewSessionIDFromString(str string) SessionID {
|
||||
if (len(str)) > 0 {
|
||||
return SessionID(str)
|
||||
}
|
||||
|
||||
return NewSessionID()
|
||||
}
|
||||
|
||||
// toTime converts a sessionID to a date/Time, considering the time zone is local.
|
||||
func (s SessionID) toTime() time.Time {
|
||||
if len(s) < 3 {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
t, err := time.ParseInLocation(timeFormat, string(s)[:len(s)-3], time.Local)
|
||||
if err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
var ms int
|
||||
if ms, err = strconv.Atoi(string(s)[len(s)-3:]); err != nil {
|
||||
return time.Time{}
|
||||
}
|
||||
|
||||
return t.Add(time.Duration(ms) * time.Millisecond)
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLogging_SessionID(t *testing.T) {
|
||||
now := time.Now()
|
||||
sessionID := NewSessionID()
|
||||
sessionTime := sessionID.toTime()
|
||||
require.False(t, sessionTime.IsZero())
|
||||
require.WithinRange(t, sessionTime, now.Add(-1*time.Millisecond), now.Add(1*time.Millisecond))
|
||||
|
||||
fromString := NewSessionIDFromString("")
|
||||
require.True(t, len(fromString) > 0)
|
||||
fromString = NewSessionIDFromString(string(sessionID))
|
||||
require.True(t, len(fromString) > 0)
|
||||
require.Equal(t, sessionID, fromString)
|
||||
}
|
|
@ -0,0 +1,419 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package user
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"mime"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ProtonMail/gluon/imap"
|
||||
"github.com/ProtonMail/gluon/rfc822"
|
||||
"github.com/ProtonMail/go-proton-api"
|
||||
"github.com/ProtonMail/gopenpgp/v2/constants"
|
||||
"github.com/ProtonMail/gopenpgp/v2/crypto"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/safe"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/vault"
|
||||
"github.com/bradenaw/juniper/xmaps"
|
||||
"github.com/bradenaw/juniper/xslices"
|
||||
"github.com/emersion/go-message"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type DiagnosticMetadata struct {
|
||||
MessageIDs []string
|
||||
Metadata []proton.MessageMetadata
|
||||
FailedMessageIDs xmaps.Set[string]
|
||||
}
|
||||
|
||||
type AccountMailboxMap map[string][]DiagMailboxMessage
|
||||
|
||||
type DiagMailboxMessage struct {
|
||||
AddressID string
|
||||
UserID string
|
||||
ID string
|
||||
Flags imap.FlagSet
|
||||
}
|
||||
|
||||
func (apm DiagnosticMetadata) BuildMailboxToMessageMap(user *User) (map[string]AccountMailboxMap, error) {
|
||||
return safe.RLockRetErr(func() (map[string]AccountMailboxMap, error) {
|
||||
result := make(map[string]AccountMailboxMap)
|
||||
|
||||
mode := user.GetAddressMode()
|
||||
primaryAddrID, err := getPrimaryAddr(user.apiAddrs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get primary addr for user: %w", err)
|
||||
}
|
||||
|
||||
getAccount := func(addrID string) (AccountMailboxMap, bool) {
|
||||
if mode == vault.CombinedMode {
|
||||
addrID = primaryAddrID.ID
|
||||
}
|
||||
|
||||
addr := user.apiAddrs[addrID]
|
||||
if addr.Status != proton.AddressStatusEnabled {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
v, ok := result[addr.Email]
|
||||
if !ok {
|
||||
result[addr.Email] = make(AccountMailboxMap)
|
||||
v = result[addr.Email]
|
||||
}
|
||||
|
||||
return v, true
|
||||
}
|
||||
|
||||
for _, metadata := range apm.Metadata {
|
||||
for _, label := range metadata.LabelIDs {
|
||||
details, ok := user.apiLabels[label]
|
||||
if !ok {
|
||||
logrus.Warnf("User %v has message with unknown label '%v'", user.Name(), label)
|
||||
continue
|
||||
}
|
||||
|
||||
if !wantLabel(details) {
|
||||
continue
|
||||
}
|
||||
|
||||
account, enabled := getAccount(metadata.AddressID)
|
||||
if !enabled {
|
||||
continue
|
||||
}
|
||||
|
||||
var mboxName string
|
||||
if details.Type == proton.LabelTypeSystem {
|
||||
mboxName = details.Name
|
||||
} else {
|
||||
mboxName = strings.Join(getMailboxName(details), "/")
|
||||
}
|
||||
|
||||
mboxMessage := DiagMailboxMessage{
|
||||
UserID: user.ID(),
|
||||
ID: metadata.ID,
|
||||
AddressID: metadata.AddressID,
|
||||
Flags: buildFlagSetFromMessageMetadata(metadata),
|
||||
}
|
||||
|
||||
if v, ok := account[mboxName]; ok {
|
||||
account[mboxName] = append(v, mboxMessage)
|
||||
} else {
|
||||
account[mboxName] = []DiagMailboxMessage{mboxMessage}
|
||||
}
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}, user.apiAddrsLock, user.apiLabelsLock)
|
||||
}
|
||||
|
||||
func (user *User) GetDiagnosticMetadata(ctx context.Context) (DiagnosticMetadata, error) {
|
||||
failedMessages := xmaps.SetFromSlice(user.vault.SyncStatus().FailedMessageIDs)
|
||||
|
||||
messageIDs, err := user.client.GetMessageIDs(ctx, "")
|
||||
if err != nil {
|
||||
return DiagnosticMetadata{}, err
|
||||
}
|
||||
|
||||
meta := make([]proton.MessageMetadata, 0, len(messageIDs))
|
||||
|
||||
for _, m := range xslices.Chunk(messageIDs, 100) {
|
||||
metadata, err := user.client.GetMessageMetadataPage(ctx, 0, len(m), proton.MessageFilter{ID: m})
|
||||
if err != nil {
|
||||
return DiagnosticMetadata{}, err
|
||||
}
|
||||
|
||||
meta = append(meta, metadata...)
|
||||
}
|
||||
|
||||
return DiagnosticMetadata{
|
||||
MessageIDs: messageIDs,
|
||||
Metadata: meta,
|
||||
FailedMessageIDs: failedMessages,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (user *User) DebugDownloadMessages(
|
||||
ctx context.Context,
|
||||
path string,
|
||||
msgs map[string]DiagMailboxMessage,
|
||||
progressCB func(string, int, int),
|
||||
) error {
|
||||
var err error
|
||||
safe.RLock(func() {
|
||||
err = func() error {
|
||||
total := len(msgs)
|
||||
userID := user.ID()
|
||||
|
||||
counter := 1
|
||||
for _, msg := range msgs {
|
||||
if progressCB != nil {
|
||||
progressCB(userID, counter, total)
|
||||
counter++
|
||||
}
|
||||
|
||||
msgDir := filepath.Join(path, msg.ID)
|
||||
if err := os.MkdirAll(msgDir, 0o700); err != nil {
|
||||
return fmt.Errorf("failed to create directory '%v':%w", msgDir, err)
|
||||
}
|
||||
|
||||
message, err := user.client.GetFullMessage(ctx, msg.ID, newProtonAPIScheduler(user.panicHandler), proton.NewDefaultAttachmentAllocator())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to download message '%v':%w", msg.ID, err)
|
||||
}
|
||||
|
||||
if err := writeMetadata(msgDir, message.Message); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := withAddrKR(user.apiUser, user.apiAddrs[msg.AddressID], user.vault.KeyPass(), func(_, addrKR *crypto.KeyRing) error {
|
||||
switch {
|
||||
case len(message.Attachments) > 0:
|
||||
return decodeMultipartMessage(msgDir, addrKR, message.Message, message.AttData)
|
||||
|
||||
case message.MIMEType == "multipart/mixed":
|
||||
return decodePGPMessage(msgDir, addrKR, message.Message)
|
||||
|
||||
default:
|
||||
return decodeSimpleMessage(msgDir, addrKR, message.Message)
|
||||
}
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}()
|
||||
}, user.apiAddrsLock, user.apiUserLock)
|
||||
return err
|
||||
}
|
||||
|
||||
func getBodyName(path string) string {
|
||||
return filepath.Join(path, "body.txt")
|
||||
}
|
||||
|
||||
func getBodyNameFailed(path string) string {
|
||||
return filepath.Join(path, "body_failed.txt")
|
||||
}
|
||||
|
||||
func getBodyNamePGP(path string) string {
|
||||
return filepath.Join(path, "body.pgp")
|
||||
}
|
||||
|
||||
func getMetadataPath(path string) string {
|
||||
return filepath.Join(path, "metadata.json")
|
||||
}
|
||||
|
||||
func getAttachmentPathSuccess(path, id, name string) string {
|
||||
return filepath.Join(path, fmt.Sprintf("attachment_%v_%v", id, name))
|
||||
}
|
||||
|
||||
func getAttachmentPathFailure(path, id string) string {
|
||||
return filepath.Join(path, fmt.Sprintf("attachment_%v_failed.pgp", id))
|
||||
}
|
||||
|
||||
func decodeMultipartMessage(outPath string, kr *crypto.KeyRing, msg proton.Message, attData [][]byte) error {
|
||||
for idx, attachment := range msg.Attachments {
|
||||
if err := decodeAttachment(outPath, kr, attachment, attData[idx]); err != nil {
|
||||
return fmt.Errorf("failed to decode attachment %v of message %v: %w", attachment.ID, msg.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
return decodeSimpleMessage(outPath, kr, msg)
|
||||
}
|
||||
|
||||
func decodePGPMessage(outPath string, kr *crypto.KeyRing, msg proton.Message) error {
|
||||
var decrypted bytes.Buffer
|
||||
decrypted.Grow(len(msg.Body))
|
||||
|
||||
if err := msg.DecryptInto(kr, &decrypted); err != nil {
|
||||
logrus.Warnf("Failed to decrypt pgp message %v, storing as is: %v", msg.ID, err)
|
||||
bodyPath := getBodyNamePGP(outPath)
|
||||
if err := os.WriteFile(bodyPath, []byte(msg.Body), 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write pgp body to '%v': %w", bodyPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
bodyPath := getBodyName(outPath)
|
||||
|
||||
if err := os.WriteFile(bodyPath, decrypted.Bytes(), 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write pgp body to '%v': %w", bodyPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeSimpleMessage(outPath string, kr *crypto.KeyRing, msg proton.Message) error {
|
||||
var decrypted bytes.Buffer
|
||||
decrypted.Grow(len(msg.Body))
|
||||
|
||||
if err := msg.DecryptInto(kr, &decrypted); err != nil {
|
||||
logrus.Warnf("Failed to decrypt simple message %v, will try again as attachment : %v", msg.ID, err)
|
||||
return writeCustomTextPart(getBodyNameFailed(outPath), msg, err)
|
||||
}
|
||||
|
||||
bodyPath := getBodyName(outPath)
|
||||
|
||||
if err := os.WriteFile(bodyPath, decrypted.Bytes(), 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write simple body to '%v': %w", bodyPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeMetadata(outPath string, msg proton.Message) error {
|
||||
type CustomMetadata struct {
|
||||
proton.MessageMetadata
|
||||
Header string
|
||||
ParsedHeaders proton.Headers
|
||||
MIMEType rfc822.MIMEType
|
||||
Attachments []proton.Attachment
|
||||
}
|
||||
|
||||
metadata := CustomMetadata{
|
||||
MessageMetadata: msg.MessageMetadata,
|
||||
Header: msg.Header,
|
||||
ParsedHeaders: msg.ParsedHeaders,
|
||||
MIMEType: msg.MIMEType,
|
||||
Attachments: msg.Attachments,
|
||||
}
|
||||
|
||||
j, err := json.MarshalIndent(metadata, "", " ")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to encode json for message %v: %w", msg.ID, err)
|
||||
}
|
||||
|
||||
metaPath := getMetadataPath(outPath)
|
||||
|
||||
if err := os.WriteFile(metaPath, j, 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write metadata to '%v': %w", metaPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeAttachment(outPath string, kr *crypto.KeyRing,
|
||||
att proton.Attachment,
|
||||
attData []byte) error {
|
||||
kps, err := base64.StdEncoding.DecodeString(att.KeyPackets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Use io.Multi
|
||||
attachmentReader := io.MultiReader(bytes.NewReader(kps), bytes.NewReader(attData))
|
||||
|
||||
stream, err := kr.DecryptStream(attachmentReader, nil, crypto.GetUnixTime())
|
||||
if err != nil {
|
||||
logrus.
|
||||
WithField("attID", att.ID).
|
||||
WithError(err).
|
||||
Warn("Attachment decryption failed - construct")
|
||||
|
||||
var pgpMessageBuffer bytes.Buffer
|
||||
pgpMessageBuffer.Grow(len(kps) + len(attData))
|
||||
pgpMessageBuffer.Write(kps)
|
||||
pgpMessageBuffer.Write(attData)
|
||||
|
||||
return writeCustomAttachmentPart(getAttachmentPathFailure(outPath, att.ID), att, &crypto.PGPMessage{Data: pgpMessageBuffer.Bytes()}, err)
|
||||
}
|
||||
|
||||
var decryptBuffer bytes.Buffer
|
||||
decryptBuffer.Grow(len(kps) + len(attData))
|
||||
|
||||
if _, err := decryptBuffer.ReadFrom(stream); err != nil {
|
||||
logrus.
|
||||
WithField("attID", att.ID).
|
||||
WithError(err).
|
||||
Warn("Attachment decryption failed - stream")
|
||||
|
||||
var pgpMessageBuffer bytes.Buffer
|
||||
pgpMessageBuffer.Grow(len(kps) + len(attData))
|
||||
pgpMessageBuffer.Write(kps)
|
||||
pgpMessageBuffer.Write(attData)
|
||||
|
||||
return writeCustomAttachmentPart(getAttachmentPathFailure(outPath, att.ID), att, &crypto.PGPMessage{Data: pgpMessageBuffer.Bytes()}, err)
|
||||
}
|
||||
|
||||
attachmentPath := getAttachmentPathSuccess(outPath, att.ID, att.Name)
|
||||
|
||||
if err := os.WriteFile(attachmentPath, decryptBuffer.Bytes(), 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write attachment %v to '%v': %w", att.ID, attachmentPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeCustomTextPart(
|
||||
outPath string,
|
||||
msg proton.Message,
|
||||
decError error,
|
||||
) error {
|
||||
enc, err := crypto.NewPGPMessageFromArmored(msg.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
arm, err := enc.GetArmoredWithCustomHeaders(
|
||||
fmt.Sprintf("This message could not be decrypted: %v", decError),
|
||||
constants.ArmorHeaderVersion,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.WriteFile(outPath, []byte(arm), 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write custom message %v data to '%v': %w", msg.ID, outPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeCustomAttachmentPart writes an armored-PGP data part for an attachment that couldn't be decrypted.
|
||||
func writeCustomAttachmentPart(
|
||||
outPath string,
|
||||
att proton.Attachment,
|
||||
msg *crypto.PGPMessage,
|
||||
decError error,
|
||||
) error {
|
||||
arm, err := msg.GetArmoredWithCustomHeaders(
|
||||
fmt.Sprintf("This attachment could not be decrypted: %v", decError),
|
||||
constants.ArmorHeaderVersion,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
filename := mime.QEncoding.Encode("utf-8", att.Name+".pgp")
|
||||
|
||||
var hdr message.Header
|
||||
|
||||
hdr.SetContentType("application/octet-stream", map[string]string{"name": filename})
|
||||
hdr.SetContentDisposition(string(att.Disposition), map[string]string{"filename": filename})
|
||||
|
||||
if err := os.WriteFile(outPath, []byte(arm), 0o600); err != nil {
|
||||
return fmt.Errorf("failed to write custom attachment %v part to '%v': %w", att.ID, outPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -514,9 +514,9 @@ func (user *User) handleMessageEvents(ctx context.Context, messageEvents []proto
|
|||
case proton.EventUpdate, proton.EventUpdateFlags:
|
||||
// Draft update means to completely remove old message and upload the new data again, but we should
|
||||
// only do this if the event is of type EventUpdate otherwise label switch operations will not work.
|
||||
if event.Message.IsDraft() && event.Action == proton.EventUpdate {
|
||||
updates, err := user.handleUpdateDraftEvent(
|
||||
logging.WithLogrusField(ctx, "action", "update draft"),
|
||||
if (event.Message.IsDraft() || (event.Message.Flags&proton.MessageFlagSent != 0)) && event.Action == proton.EventUpdate {
|
||||
updates, err := user.handleUpdateDraftOrSentMessage(
|
||||
logging.WithLogrusField(ctx, "action", "update draft or sent message"),
|
||||
event,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -648,23 +648,7 @@ func (user *User) handleUpdateMessageEvent(_ context.Context, message proton.Mes
|
|||
"subject": logging.Sensitive(message.Subject),
|
||||
}).Info("Handling message updated event")
|
||||
|
||||
flags := imap.NewFlagSet()
|
||||
|
||||
if message.Seen() {
|
||||
flags.AddToSelf(imap.FlagSeen)
|
||||
}
|
||||
|
||||
if message.Starred() {
|
||||
flags.AddToSelf(imap.FlagFlagged)
|
||||
}
|
||||
|
||||
if message.IsDraft() {
|
||||
flags.AddToSelf(imap.FlagDraft)
|
||||
}
|
||||
|
||||
if message.IsRepliedAll == true || message.IsReplied == true { //nolint: gosimple
|
||||
flags.AddToSelf(imap.FlagAnswered)
|
||||
}
|
||||
flags := buildFlagSetFromMessageMetadata(message)
|
||||
|
||||
update := imap.NewMessageMailboxesUpdated(
|
||||
imap.MessageID(message.ID),
|
||||
|
@ -701,18 +685,19 @@ func (user *User) handleDeleteMessageEvent(_ context.Context, event proton.Messa
|
|||
}, user.updateChLock)
|
||||
}
|
||||
|
||||
func (user *User) handleUpdateDraftEvent(ctx context.Context, event proton.MessageEvent) ([]imap.Update, error) {
|
||||
func (user *User) handleUpdateDraftOrSentMessage(ctx context.Context, event proton.MessageEvent) ([]imap.Update, error) {
|
||||
return safe.RLockRetErr(func() ([]imap.Update, error) {
|
||||
user.log.WithFields(logrus.Fields{
|
||||
"messageID": event.ID,
|
||||
"subject": logging.Sensitive(event.Message.Subject),
|
||||
}).Info("Handling draft updated event")
|
||||
"isDraft": event.Message.IsDraft(),
|
||||
}).Info("Handling draft or sent updated event")
|
||||
|
||||
full, err := user.client.GetFullMessage(ctx, event.Message.ID, newProtonAPIScheduler(user.panicHandler), proton.NewDefaultAttachmentAllocator())
|
||||
if err != nil {
|
||||
// If the message is not found, it means that it has been deleted before we could fetch it.
|
||||
if apiErr := new(proton.APIError); errors.As(err, &apiErr) && apiErr.Status == http.StatusUnprocessableEntity {
|
||||
user.log.WithField("messageID", event.Message.ID).Warn("Cannot update draft: full message is missing on API")
|
||||
user.log.WithField("messageID", event.Message.ID).Warn("Cannot update message: full message is missing on API")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -642,19 +642,7 @@ func (conn *imapConnector) importMessage(
|
|||
}
|
||||
|
||||
func toIMAPMessage(message proton.MessageMetadata) imap.Message {
|
||||
flags := imap.NewFlagSet()
|
||||
|
||||
if !message.Unread {
|
||||
flags = flags.Add(imap.FlagSeen)
|
||||
}
|
||||
|
||||
if slices.Contains(message.LabelIDs, proton.StarredLabel) {
|
||||
flags = flags.Add(imap.FlagFlagged)
|
||||
}
|
||||
|
||||
if slices.Contains(message.LabelIDs, proton.DraftsLabel) {
|
||||
flags = flags.Add(imap.FlagDraft)
|
||||
}
|
||||
flags := buildFlagSetFromMessageMetadata(message)
|
||||
|
||||
var date time.Time
|
||||
|
||||
|
@ -747,3 +735,25 @@ func toIMAPMailbox(label proton.Label, flags, permFlags, attrs imap.FlagSet) ima
|
|||
func isAllMailOrScheduled(mailboxID imap.MailboxID) bool {
|
||||
return (mailboxID == proton.AllMailLabel) || (mailboxID == proton.AllScheduledLabel)
|
||||
}
|
||||
|
||||
func buildFlagSetFromMessageMetadata(message proton.MessageMetadata) imap.FlagSet {
|
||||
flags := imap.NewFlagSet()
|
||||
|
||||
if message.Seen() {
|
||||
flags.AddToSelf(imap.FlagSeen)
|
||||
}
|
||||
|
||||
if message.Starred() {
|
||||
flags.AddToSelf(imap.FlagFlagged)
|
||||
}
|
||||
|
||||
if message.IsDraft() {
|
||||
flags.AddToSelf(imap.FlagDraft)
|
||||
}
|
||||
|
||||
if message.IsRepliedAll == true || message.IsReplied == true { //nolint: gosimple
|
||||
flags.AddToSelf(imap.FlagAnswered)
|
||||
}
|
||||
|
||||
return flags
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -139,6 +140,8 @@ func (user *User) sync(ctx context.Context) error {
|
|||
return fmt.Errorf("failed to get message IDs to sync: %w", err)
|
||||
}
|
||||
|
||||
logrus.Debugf("User has the following failed synced message ids: %v", user.vault.SyncStatus().FailedMessageIDs)
|
||||
|
||||
// Remove any messages that have already failed to sync.
|
||||
messageIDs = xslices.Filter(messageIDs, func(messageID string) bool {
|
||||
return !slices.Contains(user.vault.SyncStatus().FailedMessageIDs, messageID)
|
||||
|
@ -241,6 +244,46 @@ func toMB(v uint64) float64 {
|
|||
return float64(v) / float64(Megabyte)
|
||||
}
|
||||
|
||||
type syncLimits struct {
|
||||
MaxDownloadRequestMem uint64
|
||||
MinDownloadRequestMem uint64
|
||||
MaxMessageBuildingMem uint64
|
||||
MinMessageBuildingMem uint64
|
||||
MaxSyncMemory uint64
|
||||
MaxParallelDownloads int
|
||||
}
|
||||
|
||||
func newSyncLimits(maxSyncMemory uint64) syncLimits {
|
||||
limits := syncLimits{
|
||||
// There's no point in using more than 128MB of download data per stage, after that we reach a point of diminishing
|
||||
// returns as we can't keep the pipeline fed fast enough.
|
||||
MaxDownloadRequestMem: 128 * Megabyte,
|
||||
|
||||
// Any lower than this and we may fail to download messages.
|
||||
MinDownloadRequestMem: 40 * Megabyte,
|
||||
|
||||
// This value can be increased to your hearts content. The more system memory the user has, the more messages
|
||||
// we can build in parallel.
|
||||
MaxMessageBuildingMem: 128 * Megabyte,
|
||||
MinMessageBuildingMem: 64 * Megabyte,
|
||||
|
||||
// Maximum recommend value for parallel downloads by the API team.
|
||||
MaxParallelDownloads: 20,
|
||||
|
||||
MaxSyncMemory: maxSyncMemory,
|
||||
}
|
||||
|
||||
if _, ok := os.LookupEnv("BRIDGE_SYNC_FORCE_MINIMUM_SPEC"); ok {
|
||||
logrus.Warn("Sync specs forced to minimum")
|
||||
limits.MaxDownloadRequestMem = 50 * Megabyte
|
||||
limits.MaxMessageBuildingMem = 80 * Megabyte
|
||||
limits.MaxParallelDownloads = 2
|
||||
limits.MaxSyncMemory = 800 * Megabyte
|
||||
}
|
||||
|
||||
return limits
|
||||
}
|
||||
|
||||
// nolint:gocyclo
|
||||
func (user *User) syncMessages(
|
||||
ctx context.Context,
|
||||
|
@ -253,7 +296,7 @@ func (user *User) syncMessages(
|
|||
addrKRs map[string]*crypto.KeyRing,
|
||||
updateCh map[string]*async.QueuedChannel[imap.Update],
|
||||
eventCh *async.QueuedChannel[events.Event],
|
||||
maxSyncMemory uint64,
|
||||
cfgMaxSyncMemory uint64,
|
||||
) error {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
@ -276,59 +319,51 @@ func (user *User) syncMessages(
|
|||
// Expected mem usage for this whole process should be the sum of MaxMessageBuildingMem and MaxDownloadRequestMem
|
||||
// times x due to pipeline and all additional memory used by network requests and compression+io.
|
||||
|
||||
// There's no point in using more than 128MB of download data per stage, after that we reach a point of diminishing
|
||||
// returns as we can't keep the pipeline fed fast enough.
|
||||
const MaxDownloadRequestMem = 128 * Megabyte
|
||||
|
||||
// Any lower than this and we may fail to download messages.
|
||||
const MinDownloadRequestMem = 40 * Megabyte
|
||||
|
||||
// This value can be increased to your hearts content. The more system memory the user has, the more messages
|
||||
// we can build in parallel.
|
||||
const MaxMessageBuildingMem = 128 * Megabyte
|
||||
const MinMessageBuildingMem = 64 * Megabyte
|
||||
|
||||
// Maximum recommend value for parallel downloads by the API team.
|
||||
const maxParallelDownloads = 20
|
||||
|
||||
totalMemory := memory.TotalMemory()
|
||||
|
||||
if maxSyncMemory >= totalMemory/2 {
|
||||
syncLimits := newSyncLimits(cfgMaxSyncMemory)
|
||||
|
||||
if syncLimits.MaxSyncMemory >= totalMemory/2 {
|
||||
logrus.Warnf("Requested max sync memory of %v MB is greater than half of system memory (%v MB), forcing to half of system memory",
|
||||
maxSyncMemory, toMB(totalMemory/2))
|
||||
maxSyncMemory = totalMemory / 2
|
||||
toMB(syncLimits.MaxSyncMemory), toMB(totalMemory/2))
|
||||
syncLimits.MaxSyncMemory = totalMemory / 2
|
||||
}
|
||||
|
||||
if maxSyncMemory < 800*Megabyte {
|
||||
logrus.Warnf("Requested max sync memory of %v MB, but minimum recommended is 800 MB, forcing max syncMemory to 800MB", toMB(maxSyncMemory))
|
||||
maxSyncMemory = 800 * Megabyte
|
||||
if syncLimits.MaxSyncMemory < 800*Megabyte {
|
||||
logrus.Warnf("Requested max sync memory of %v MB, but minimum recommended is 800 MB, forcing max syncMemory to 800MB", toMB(syncLimits.MaxSyncMemory))
|
||||
syncLimits.MaxSyncMemory = 800 * Megabyte
|
||||
}
|
||||
|
||||
logrus.Debugf("Total System Memory: %v", toMB(totalMemory))
|
||||
|
||||
syncMaxDownloadRequestMem := MaxDownloadRequestMem
|
||||
syncMaxMessageBuildingMem := MaxMessageBuildingMem
|
||||
// Linter says it's not used. This is a lie.
|
||||
//nolint: staticcheck
|
||||
syncMaxDownloadRequestMem := syncLimits.MaxDownloadRequestMem
|
||||
|
||||
// Linter says it's not used. This is a lie.
|
||||
//nolint: staticcheck
|
||||
syncMaxMessageBuildingMem := syncLimits.MaxMessageBuildingMem
|
||||
|
||||
// If less than 2GB available try and limit max memory to 512 MB
|
||||
switch {
|
||||
case maxSyncMemory < 2*Gigabyte:
|
||||
if maxSyncMemory < 800*Megabyte {
|
||||
case syncLimits.MaxSyncMemory < 2*Gigabyte:
|
||||
if syncLimits.MaxSyncMemory < 800*Megabyte {
|
||||
logrus.Warnf("System has less than 800MB of memory, you may experience issues sycing large mailboxes")
|
||||
}
|
||||
syncMaxDownloadRequestMem = MinDownloadRequestMem
|
||||
syncMaxMessageBuildingMem = MinMessageBuildingMem
|
||||
case maxSyncMemory == 2*Gigabyte:
|
||||
syncMaxDownloadRequestMem = syncLimits.MinDownloadRequestMem
|
||||
syncMaxMessageBuildingMem = syncLimits.MinMessageBuildingMem
|
||||
case syncLimits.MaxSyncMemory == 2*Gigabyte:
|
||||
// Increasing the max download capacity has very little effect on sync speed. We could increase the download
|
||||
// memory but the user would see less sync notifications. A smaller value here leads to more frequent
|
||||
// updates. Additionally, most of ot sync time is spent in the message building.
|
||||
syncMaxDownloadRequestMem = MaxDownloadRequestMem
|
||||
syncMaxDownloadRequestMem = syncLimits.MaxDownloadRequestMem
|
||||
// Currently limited so that if a user has multiple accounts active it also doesn't cause excessive memory usage.
|
||||
syncMaxMessageBuildingMem = MaxMessageBuildingMem
|
||||
syncMaxMessageBuildingMem = syncLimits.MaxMessageBuildingMem
|
||||
default:
|
||||
// Divide by 8 as download stage and build stage will use aprox. 4x the specified memory.
|
||||
remainingMemory := (maxSyncMemory - 2*Gigabyte) / 8
|
||||
syncMaxDownloadRequestMem = MaxDownloadRequestMem + remainingMemory
|
||||
syncMaxMessageBuildingMem = MaxMessageBuildingMem + remainingMemory
|
||||
remainingMemory := (syncLimits.MaxSyncMemory - 2*Gigabyte) / 8
|
||||
syncMaxDownloadRequestMem = syncLimits.MaxDownloadRequestMem + remainingMemory
|
||||
syncMaxMessageBuildingMem = syncLimits.MaxMessageBuildingMem + remainingMemory
|
||||
}
|
||||
|
||||
logrus.Debugf("Max memory usage for sync Download=%vMB Building=%vMB Predicted Max Total=%vMB",
|
||||
|
@ -367,7 +402,7 @@ func (user *User) syncMessages(
|
|||
|
||||
flushUpdateCh := make(chan flushUpdate)
|
||||
|
||||
errorCh := make(chan error, maxParallelDownloads*4)
|
||||
errorCh := make(chan error, syncLimits.MaxParallelDownloads*4)
|
||||
|
||||
// Go routine in charge of downloading message metadata
|
||||
async.GoAnnotated(ctx, user.panicHandler, func(ctx context.Context) {
|
||||
|
@ -441,7 +476,7 @@ func (user *User) syncMessages(
|
|||
logrus.Debugf("sync downloader exit")
|
||||
}()
|
||||
|
||||
attachmentDownloader := user.newAttachmentDownloader(ctx, client, maxParallelDownloads)
|
||||
attachmentDownloader := user.newAttachmentDownloader(ctx, client, syncLimits.MaxParallelDownloads)
|
||||
defer attachmentDownloader.close()
|
||||
|
||||
for request := range downloadCh {
|
||||
|
@ -456,7 +491,7 @@ func (user *User) syncMessages(
|
|||
return
|
||||
}
|
||||
|
||||
result, err := parallel.MapContext(ctx, maxParallelDownloads, request.ids, func(ctx context.Context, id string) (proton.FullMessage, error) {
|
||||
result, err := parallel.MapContext(ctx, syncLimits.MaxParallelDownloads, request.ids, func(ctx context.Context, id string) (proton.FullMessage, error) {
|
||||
defer async.HandlePanic(user.panicHandler)
|
||||
|
||||
var result proton.FullMessage
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"github.com/ProtonMail/proton-bridge/v3/internal/updater"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/useragent"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/vault"
|
||||
"github.com/ProtonMail/proton-bridge/v3/pkg/ports"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -34,7 +35,7 @@ func TestVault_Settings_IMAP(t *testing.T) {
|
|||
s := newVault(t)
|
||||
|
||||
// Check the default IMAP port and SSL setting.
|
||||
require.Equal(t, 1143, s.GetIMAPPort())
|
||||
require.Equal(t, ports.FindFreePortFrom(1143), s.GetIMAPPort())
|
||||
require.Equal(t, false, s.GetIMAPSSL())
|
||||
|
||||
// Modify the IMAP port and SSL setting.
|
||||
|
@ -51,7 +52,7 @@ func TestVault_Settings_SMTP(t *testing.T) {
|
|||
s := newVault(t)
|
||||
|
||||
// Check the default SMTP port and SSL setting.
|
||||
require.Equal(t, 1025, s.GetSMTPPort())
|
||||
require.Equal(t, ports.FindFreePortFrom(1025), s.GetSMTPPort())
|
||||
require.Equal(t, false, s.GetSMTPSSL())
|
||||
|
||||
// Modify the SMTP port and SSL setting.
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/ProtonMail/gluon/async"
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/vault"
|
||||
"github.com/ProtonMail/proton-bridge/v3/pkg/ports"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -93,8 +94,8 @@ func TestVault_Reset(t *testing.T) {
|
|||
require.NoError(t, s.Reset(s.GetGluonCacheDir()))
|
||||
|
||||
// The data is gone.
|
||||
require.Equal(t, 1143, s.GetIMAPPort())
|
||||
require.Equal(t, 1025, s.GetSMTPPort())
|
||||
require.Equal(t, ports.FindFreePortFrom(1143), s.GetIMAPPort())
|
||||
require.Equal(t, ports.FindFreePortFrom(1025), s.GetSMTPPort())
|
||||
}
|
||||
|
||||
func newVault(t *testing.T) *vault.Vault {
|
||||
|
|
|
@ -30,6 +30,7 @@ type Config struct {
|
|||
DisplayName string
|
||||
Identifier string
|
||||
Organization string
|
||||
AccountName string
|
||||
AccountDescription string
|
||||
|
||||
IMAP *IMAP
|
||||
|
|
|
@ -24,6 +24,10 @@ const mailTemplate = `<?xml version="1.0" encoding="UTF-8"?>
|
|||
<key>PayloadContent</key>
|
||||
<array>
|
||||
<dict>
|
||||
{{- if .AccountName}}
|
||||
<key>EmailAccountName</key>
|
||||
<string>{{.AccountName}}</string>
|
||||
{{- end}}
|
||||
{{- if .AccountDescription}}
|
||||
<key>EmailAccountDescription</key>
|
||||
<string>{{.AccountDescription}}</string>
|
||||
|
|
|
@ -77,7 +77,7 @@ func (restarter *Restarter) Restart() {
|
|||
//nolint:gosec
|
||||
cmd := execabs.Command(
|
||||
restarter.exe,
|
||||
xslices.Join(removeFlagWithValue(removeFlag(os.Args[1:], "no-window"), "parent-pid"), restarter.flags)...,
|
||||
xslices.Join(removeFlagsWithValue(removeFlag(os.Args[1:], "no-window"), "parent-pid", "session-id"), restarter.flags)...,
|
||||
)
|
||||
|
||||
l := logrus.WithFields(logrus.Fields{
|
||||
|
@ -157,6 +157,16 @@ func removeFlagWithValue(argList []string, flag string) []string {
|
|||
return result
|
||||
}
|
||||
|
||||
// removeFlagWithValue removes flags that require a value from a list of command line parameters.
|
||||
// The flags can be of the following form `-flag value`, `--flag value`, `-flag=value` or `--flags=value`.
|
||||
func removeFlagsWithValue(argList []string, flags ...string) []string {
|
||||
for _, flag := range flags {
|
||||
argList = removeFlagWithValue(argList, flag)
|
||||
}
|
||||
|
||||
return argList
|
||||
}
|
||||
|
||||
func removeFlag(argList []string, flag string) []string {
|
||||
return xslices.Filter(argList, func(arg string) bool { return (arg != "-"+flag) && (arg != "--"+flag) })
|
||||
}
|
||||
|
|
|
@ -42,6 +42,23 @@ func TestRemoveFlagWithValue(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestRemoveFlagsWithValue(t *testing.T) {
|
||||
tests := []struct {
|
||||
argList []string
|
||||
flags []string
|
||||
expected []string
|
||||
}{
|
||||
{[]string{}, []string{"a", "b"}, nil},
|
||||
{[]string{"-a", "-b=value", "-c"}, []string{"b"}, []string{"-a", "-c"}},
|
||||
{[]string{"-a", "--b=value", "-c"}, []string{"b", "c"}, []string{"-a"}},
|
||||
{[]string{"-a", "-b", "value", "-c"}, []string{"c", "b"}, []string{"-a"}},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
require.Equal(t, removeFlagsWithValue(tt.argList, tt.flags...), tt.expected)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoveFlag(t *testing.T) {
|
||||
tests := []struct {
|
||||
argList []string
|
||||
|
|
|
@ -216,8 +216,16 @@ func TestFeatures(testingT *testing.T) {
|
|||
ctx.Step(`^IMAP client "([^"]*)" marks the message with subject "([^"]*)" as deleted$`, s.imapClientMarksTheMessageWithSubjectAsDeleted)
|
||||
ctx.Step(`^IMAP client "([^"]*)" marks message (\d+) as not deleted$`, s.imapClientMarksMessageAsNotDeleted)
|
||||
ctx.Step(`^IMAP client "([^"]*)" marks all messages as deleted$`, s.imapClientMarksAllMessagesAsDeleted)
|
||||
ctx.Step(`^IMAP client "([^"]*)" sees that message (\d+) has the flag "([^"]*)"$`, s.imapClientSeesThatMessageHasTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" expunges$`, s.imapClientExpunges)
|
||||
ctx.Step(`^IMAP client "([^"]*)" marks message (\d+) as "([^"]*)"$`, s.imapClientMarksMessageAsState)
|
||||
ctx.Step(`^IMAP client "([^"]*)" marks the message with subject "([^"]*)" as "([^"]*)"$`, s.imapClientMarksTheMessageWithSubjectAsState)
|
||||
ctx.Step(`^IMAP client "([^"]*)" marks all messages as "([^"]*)"$`, s.imapClientMarksAllMessagesAsState)
|
||||
ctx.Step(`^IMAP client "([^"]*)" eventually sees that message at row (\d+) has the flag "([^"]*)"$`, s.imapClientSeesThatMessageHasTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" eventually sees that message at row (\d+) does not have the flag "([^"]*)"$`, s.imapClientSeesThatMessageDoesNotHaveTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" eventually sees that the message with subject "([^"]*)" has the flag "([^"]*)"`, s.imapClientSeesThatTheMessageWithSubjectHasTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" eventually sees that the message with subject "([^"]*)" does not have the flag "([^"]*)"`, s.imapClientSeesThatTheMessageWithSubjectDoesNotHaveTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" eventually sees that all the messages have the flag "([^"]*)"`, s.imapClientSeesThatAllTheMessagesHaveTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" eventually sees that all the messages do not have the flag "([^"]*)"`, s.imapClientSeesThatAllTheMessagesDoNotHaveTheFlag)
|
||||
ctx.Step(`^IMAP client "([^"]*)" appends the following message to "([^"]*)":$`, s.imapClientAppendsTheFollowingMessageToMailbox)
|
||||
ctx.Step(`^IMAP client "([^"]*)" appends the following messages to "([^"]*)":$`, s.imapClientAppendsTheFollowingMessagesToMailbox)
|
||||
ctx.Step(`^IMAP client "([^"]*)" appends "([^"]*)" to "([^"]*)"$`, s.imapClientAppendsToMailbox)
|
||||
|
@ -239,6 +247,7 @@ func TestFeatures(testingT *testing.T) {
|
|||
ctx.Step(`^SMTP client "([^"]*)" sends DATA:$`, s.smtpClientSendsData)
|
||||
ctx.Step(`^SMTP client "([^"]*)" sends RSET$`, s.smtpClientSendsReset)
|
||||
ctx.Step(`^SMTP client "([^"]*)" sends the following message from "([^"]*)" to "([^"]*)":$`, s.smtpClientSendsTheFollowingMessageFromTo)
|
||||
ctx.Step(`^SMTP client "([^"]*)" sends the following EML "([^"]*)" from "([^"]*)" to "([^"]*)"$`, s.smtpClientSendsTheFollowingEmlFromTo)
|
||||
ctx.Step(`^SMTP client "([^"]*)" logs out$`, s.smtpClientLogsOut)
|
||||
|
||||
// ==== TELEMETRY ====
|
||||
|
|
|
@ -18,7 +18,7 @@ Feature: IMAP remove messages from mailbox
|
|||
When IMAP client "1" selects "Folders/mbox"
|
||||
And IMAP client "1" marks message 2 as deleted
|
||||
And it succeeds
|
||||
Then IMAP client "1" sees that message 2 has the flag "\Deleted"
|
||||
Then IMAP client "1" eventually sees that message at row 2 has the flag "\Deleted"
|
||||
When IMAP client "1" expunges
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees 9 messages in "Folders/mbox"
|
||||
|
|
|
@ -59,3 +59,40 @@ Feature: IMAP Draft messages
|
|||
| This is a dra |
|
||||
And IMAP client "1" eventually sees 0 messages in "Drafts"
|
||||
|
||||
Scenario: Draft saved without "Date" header
|
||||
When IMAP client "1" selects "Drafts"
|
||||
And IMAP client "1" marks message 1 as deleted
|
||||
And IMAP client "1" expunges
|
||||
And it succeeds
|
||||
Then IMAP client "1" appends the following message to "Drafts":
|
||||
"""
|
||||
From: foo@bar.com
|
||||
Subject: Draft without Date
|
||||
Content-Type: text/plain
|
||||
To: someone@example.com
|
||||
|
||||
This is a Draft without Date in header
|
||||
"""
|
||||
And it succeeds
|
||||
And IMAP client "1" eventually sees the following messages in "Drafts":
|
||||
| to | subject | body |
|
||||
| someone@example.com | Draft without Date | This is a Draft without Date in header |
|
||||
|
||||
Scenario: Draft saved without "From" header
|
||||
When IMAP client "1" selects "Drafts"
|
||||
And IMAP client "1" marks message 1 as deleted
|
||||
And IMAP client "1" expunges
|
||||
And it succeeds
|
||||
Then IMAP client "1" appends the following message to "Drafts":
|
||||
"""
|
||||
Date: 01 Jan 1980 00:00:00 +0000
|
||||
Subject: Draft without From
|
||||
Content-Type: text/plain
|
||||
To: someone@example.com
|
||||
|
||||
This is a Draft without From in header
|
||||
"""
|
||||
And it succeeds
|
||||
And IMAP client "1" eventually sees the following messages in "Drafts":
|
||||
| to | subject | body |
|
||||
| someone@example.com | Draft without From | This is a Draft without From in header |
|
||||
|
|
|
@ -73,7 +73,7 @@ Feature: IMAP import messages
|
|||
|
||||
# The message is imported as UTF-8 and the content type is determined at build time.
|
||||
Scenario: Import message as latin1 without content type
|
||||
When IMAP client "1" appends "text_plain_unknown_latin1.eml" to "INBOX"
|
||||
When IMAP client "1" appends "plain/text_plain_unknown_latin1.eml" to "INBOX"
|
||||
Then it succeeds
|
||||
And IMAP client "1" eventually sees the following messages in "INBOX":
|
||||
| from | to | body |
|
||||
|
@ -81,7 +81,7 @@ Feature: IMAP import messages
|
|||
|
||||
# The message is imported and the body is converted to UTF-8.
|
||||
Scenario: Import message as latin1 with content type
|
||||
When IMAP client "1" appends "text_plain_latin1.eml" to "INBOX"
|
||||
When IMAP client "1" appends "plain/text_plain_latin1.eml" to "INBOX"
|
||||
Then it succeeds
|
||||
And IMAP client "1" eventually sees the following messages in "INBOX":
|
||||
| from | to | body |
|
||||
|
@ -89,7 +89,7 @@ Feature: IMAP import messages
|
|||
|
||||
# The message is imported anad the body is wrongly converted (body is corrupted).
|
||||
Scenario: Import message as latin1 with wrong content type
|
||||
When IMAP client "1" appends "text_plain_wrong_latin1.eml" to "INBOX"
|
||||
When IMAP client "1" appends "plain/text_plain_wrong_latin1.eml" to "INBOX"
|
||||
Then it succeeds
|
||||
And IMAP client "1" eventually sees the following messages in "INBOX":
|
||||
| from | to |
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
Feature: IMAP change state of message in mailbox
|
||||
Background:
|
||||
Given there exists an account with username "[user:user]" and password "password"
|
||||
And the account "[user:user]" has the following custom mailboxes:
|
||||
| name | type |
|
||||
| one | folder |
|
||||
| two | folder |
|
||||
And the address "[user:user]@[domain]" of account "[user:user]" has 5 messages in "Folders/one"
|
||||
And the address "[user:user]@[domain]" of account "[user:user]" has 150 messages in "Folders/two"
|
||||
And the address "[user:user]@[domain]" of account "[user:user]" has the following messages in "Inbox":
|
||||
| from | to | subject | unread |
|
||||
| a@example.com | b@example.com | one | true |
|
||||
| c@example.com | d@example.com | two | false |
|
||||
And bridge starts
|
||||
And the user logs in with username "[user:user]" and password "password"
|
||||
And user "[user:user]" finishes syncing
|
||||
And user "[user:user]" connects and authenticates IMAP client "1"
|
||||
|
||||
Scenario: Mark message as read
|
||||
When IMAP client "1" selects "Folders/one"
|
||||
And IMAP client "1" marks message 1 as "read"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that message at row 1 has the flag "\Seen"
|
||||
|
||||
Scenario: Mark message as unread
|
||||
When IMAP client "1" selects "Folders/one"
|
||||
And IMAP client "1" marks message 1 as "unread"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that message at row 1 does not have the flag "\Seen"
|
||||
|
||||
Scenario: Mark message as starred
|
||||
When IMAP client "1" selects "Folders/one"
|
||||
And IMAP client "1" marks message 1 as "starred"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that message at row 1 has the flag "\Flagged"
|
||||
|
||||
Scenario: Mark message as unstarred
|
||||
When IMAP client "1" selects "Folders/one"
|
||||
And IMAP client "1" marks message 1 as "unstarred"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that message at row 1 does not have the flag "\Flagged"
|
||||
|
||||
Scenario: Mark message with subject as read/unread
|
||||
When IMAP client "1" selects "Inbox"
|
||||
And IMAP client "1" marks the message with subject "one" as "read"
|
||||
And it succeeds
|
||||
And IMAP client "1" marks the message with subject "two" as "unread"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that the message with subject "one" has the flag "\Seen"
|
||||
And IMAP client "1" eventually sees that the message with subject "two" does not have the flag "\Seen"
|
||||
|
||||
Scenario: Mark all messages in folder as read/unread
|
||||
When IMAP client "1" selects "Folders/two"
|
||||
And IMAP client "1" marks all messages as "read"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that all the messages have the flag "\Seen"
|
||||
When IMAP client "1" marks all messages as "unread"
|
||||
And it succeeds
|
||||
Then IMAP client "1" eventually sees that all the messages do not have the flag "\Seen"
|
|
@ -343,3 +343,34 @@ Feature: SMTP sending of plain messages
|
|||
}
|
||||
}
|
||||
"""
|
||||
|
||||
Scenario: HTML message with Foreign/Nonascii chars in Subject and Body
|
||||
When there exists an account with username "bridgetest" and password "password"
|
||||
And the user logs in with username "bridgetest" and password "password"
|
||||
And user "bridgetest" connects and authenticates SMTP client "1"
|
||||
And SMTP client "1" sends the following EML "html/foreign_ascii_subject_body.eml" from "bridgetest@proton.local" to "pm.bridge.qa@gmail.com"
|
||||
Then it succeeds
|
||||
When user "bridgetest" connects and authenticates IMAP client "1"
|
||||
Then IMAP client "1" eventually sees the following messages in "Sent":
|
||||
| from | to | subject |
|
||||
| bridgetest@proton.local | pm.bridge.qa@gmail.com | Subjεέςτ ¶ Ä È |
|
||||
And the body in the "POST" request to "/mail/v4/messages" is:
|
||||
"""
|
||||
{
|
||||
"Message": {
|
||||
"Subject": "Subjεέςτ ¶ Ä È",
|
||||
"Sender": {
|
||||
"Name": "Bridge Test"
|
||||
},
|
||||
"ToList": [
|
||||
{
|
||||
"Address": "pm.bridge.qa@gmail.com",
|
||||
"Name": "External Bridge"
|
||||
}
|
||||
],
|
||||
"CCList": [],
|
||||
"BCCList": [],
|
||||
"MIMEType": "text/html"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
|
|
@ -193,3 +193,34 @@ Feature: SMTP sending of plain messages
|
|||
}
|
||||
}
|
||||
"""
|
||||
|
||||
Scenario: Basic message with multiple different attachments to internal account
|
||||
When there exists an account with username "bridgetest" and password "password"
|
||||
And the user logs in with username "bridgetest" and password "password"
|
||||
And user "bridgetest" connects and authenticates SMTP client "1"
|
||||
And SMTP client "1" sends the following EML "plain/text_plain_multiple_attachments.eml" from "bridgetest@proton.local" to "internalbridgetest@proton.local"
|
||||
Then it succeeds
|
||||
When user "bridgetest" connects and authenticates IMAP client "1"
|
||||
Then IMAP client "1" eventually sees the following messages in "Sent":
|
||||
| from | to | subject |
|
||||
| bridgetest@proton.local | internalbridgetest@proton.local | Plain with multiple different attachments |
|
||||
And the body in the "POST" request to "/mail/v4/messages" is:
|
||||
"""
|
||||
{
|
||||
"Message": {
|
||||
"Subject": "Plain with multiple different attachments",
|
||||
"Sender": {
|
||||
"Name": "Bridge Test"
|
||||
},
|
||||
"ToList": [
|
||||
{
|
||||
"Address": "internalbridgetest@proton.local",
|
||||
"Name": "Internal Bridge"
|
||||
}
|
||||
],
|
||||
"CCList": [],
|
||||
"BCCList": [],
|
||||
"MIMEType": "text/plain"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
|
|
@ -410,25 +410,88 @@ func (s *scenario) imapClientMarksAllMessagesAsDeleted(clientID string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatMessageHasTheFlag(clientID string, seq int, flag string) error {
|
||||
func (s *scenario) imapClientMarksMessageAsState(clientID string, seq int, messageState string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
fetch, err := clientFetch(client, client.Mailbox().Name)
|
||||
err := clientChangeMessageState(client, seq, messageState, true)
|
||||
if err != nil {
|
||||
s.t.pushError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientMarksTheMessageWithSubjectAsState(clientID, subject, messageState string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
uid, err := clientGetUIDBySubject(client, client.Mailbox().Name, subject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx := xslices.IndexFunc(fetch, func(msg *imap.Message) bool {
|
||||
return msg.SeqNum == uint32(seq)
|
||||
})
|
||||
|
||||
if !slices.Contains(fetch[idx].Flags, flag) {
|
||||
return fmt.Errorf("expected message %v to have flag %v, got %v", seq, flag, fetch[idx].Flags)
|
||||
if err := clientChangeMessageState(client, int(uid), messageState, true); err != nil {
|
||||
s.t.pushError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientMarksAllMessagesAsState(clientID, messageState string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
if err := clientChangeAllMessageState(client, messageState); err != nil {
|
||||
s.t.pushError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatMessageHasTheFlag(clientID string, seq int, flag string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
return clientIsFlagApplied(client, seq, flag, true, false)
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatMessageDoesNotHaveTheFlag(clientID string, seq int, flag string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
return clientIsFlagApplied(client, seq, flag, false, false)
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatTheMessageWithSubjectHasTheFlag(clientID, subject, flag string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
uid, err := clientGetUIDBySubject(client, client.Mailbox().Name, subject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return clientIsFlagApplied(client, int(uid), flag, true, false)
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatTheMessageWithSubjectDoesNotHaveTheFlag(clientID, subject, flag string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
uid, err := clientGetUIDBySubject(client, client.Mailbox().Name, subject)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return clientIsFlagApplied(client, int(uid), flag, false, false)
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatAllTheMessagesHaveTheFlag(clientID string, flag string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
return clientIsFlagApplied(client, 1, flag, true, true)
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientSeesThatAllTheMessagesDoNotHaveTheFlag(clientID string, flag string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
return clientIsFlagApplied(client, 1, flag, false, true)
|
||||
}
|
||||
|
||||
func (s *scenario) imapClientExpunges(clientID string) error {
|
||||
_, client := s.t.getIMAPClient(clientID)
|
||||
|
||||
|
@ -776,3 +839,78 @@ func clientStore(client *client.Client, from, to int, isUID bool, item imap.Stor
|
|||
func clientAppend(client *client.Client, mailbox string, literal string) error {
|
||||
return client.Append(mailbox, []string{}, time.Now(), strings.NewReader(literal))
|
||||
}
|
||||
|
||||
func clientIsFlagApplied(client *client.Client, seq int, flag string, applied bool, wholeMailbox bool) error {
|
||||
fetch, err := clientFetch(client, client.Mailbox().Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
idx := xslices.IndexFunc(fetch, func(msg *imap.Message) bool {
|
||||
return msg.SeqNum == uint32(seq)
|
||||
})
|
||||
|
||||
if slices.Contains(fetch[idx].Flags, flag) != applied {
|
||||
return fmt.Errorf("expected message %v to have flag %v set to %v, got %v", seq, flag, applied, fetch[idx].Flags)
|
||||
}
|
||||
|
||||
if wholeMailbox {
|
||||
for i := seq; i <= int(client.Mailbox().Messages); i++ {
|
||||
idx := xslices.IndexFunc(fetch, func(msg *imap.Message) bool {
|
||||
return msg.SeqNum == uint32(i)
|
||||
})
|
||||
|
||||
if slices.Contains(fetch[idx].Flags, flag) != applied {
|
||||
return fmt.Errorf("expected message %v to have flag %v set to %v, got %v", seq, flag, applied, fetch[idx].Flags)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func clientChangeMessageState(client *client.Client, seq int, messageState string, isUID bool) error {
|
||||
switch {
|
||||
case messageState == "read":
|
||||
_, err := clientStore(client, seq, seq, isUID, imap.FormatFlagsOp(imap.AddFlags, true), imap.SeenFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageState == "unread":
|
||||
_, err := clientStore(client, seq, seq, isUID, imap.FormatFlagsOp(imap.RemoveFlags, true), imap.SeenFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageState == "starred":
|
||||
_, err := clientStore(client, seq, seq, isUID, imap.FormatFlagsOp(imap.AddFlags, true), imap.FlaggedFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case messageState == "unstarred":
|
||||
_, err := clientStore(client, seq, seq, isUID, imap.FormatFlagsOp(imap.RemoveFlags, true), imap.FlaggedFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func clientChangeAllMessageState(client *client.Client, messageState string) error {
|
||||
if messageState == "read" {
|
||||
_, err := clientStore(client, 1, int(client.Mailbox().Messages), false, imap.FormatFlagsOp(imap.AddFlags, true), imap.SeenFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else if messageState == "unread" {
|
||||
_, err := clientStore(client, 1, int(client.Mailbox().Messages), false, imap.FormatFlagsOp(imap.RemoveFlags, true), imap.SeenFlag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -20,6 +20,8 @@ package tests
|
|||
import (
|
||||
"fmt"
|
||||
"net/smtp"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/ProtonMail/proton-bridge/v3/internal/constants"
|
||||
|
@ -132,28 +134,24 @@ func (s *scenario) smtpClientSendsReset(clientID string) error {
|
|||
func (s *scenario) smtpClientSendsTheFollowingMessageFromTo(clientID, from, to string, message *godog.DocString) error {
|
||||
_, client := s.t.getSMTPClient(clientID)
|
||||
|
||||
s.t.pushError(func() error {
|
||||
if err := client.Mail(from); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := clientSend(client, from, to, message.Content); err != nil {
|
||||
s.t.pushError(err)
|
||||
}
|
||||
|
||||
for _, to := range strings.Split(to, ", ") {
|
||||
if err := client.Rcpt(to); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
wc, err := client.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
func (s *scenario) smtpClientSendsTheFollowingEmlFromTo(clientID, file, from, to string) error {
|
||||
_, client := s.t.getSMTPClient(clientID)
|
||||
|
||||
if _, err := wc.Write([]byte(message.Content)); err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := os.ReadFile(filepath.Join("testdata", file))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return wc.Close()
|
||||
}())
|
||||
if err := clientSend(client, from, to, string(b)); err != nil {
|
||||
s.t.pushError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -165,3 +163,26 @@ func (s *scenario) smtpClientLogsOut(clientID string) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func clientSend(client *smtp.Client, from, to, message string) error {
|
||||
if err := client.Mail(from); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, to := range strings.Split(to, ", ") {
|
||||
if err := client.Rcpt(to); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
wc, err := client.Data()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := wc.Write([]byte(message)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return wc.Close()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
From: Bridge Test <bridgetest@proton.local>
|
||||
To: External Bridge <pm.bridge.qa@gmail.com>
|
||||
Subject: =?UTF-8?B?U3Vias61zq3Pgs+EIMK2IMOEIMOI?=
|
||||
Content-Type: text/html; charset=UTF-8
|
||||
Content-Transfer-Encoding: 8bit
|
||||
|
||||
<html>
|
||||
<head>
|
||||
|
||||
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
|
||||
</head>
|
||||
<body>
|
||||
Subjεέςτ ¶ Ä È
|
||||
</body>
|
||||
</html>
|
|
@ -0,0 +1,69 @@
|
|||
From: Bridge Test <bridgetest@proton.local>
|
||||
To: Internal Bridge <internalbridgetest@proton.local>
|
||||
Subject: Plain with multiple different attachments
|
||||
Content-Type: multipart/mixed; boundary="bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606"
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: text/plain; charset=UTF-8; format=flowed
|
||||
Content-Transfer-Encoding: 7bit
|
||||
|
||||
Body of plain text message with multiple attachments
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: application/zip; name="PINProtected.zip"
|
||||
Content-Disposition: attachment; filename="PINProtected.zip"
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
UEsDBBQACAAIAHhlwVYAAAAAAAAAABADAAAMACAAbWVzc2FnZTIudHh0VVQNAAdkdnhk7nZ4
|
||||
AABQSwUGAAAAAAIAAgC/AAAAewMAAAAA
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: application/vnd.openxmlformats-officedocument.wordprocessingml.document; name="test.docx"
|
||||
Content-Disposition: attachment; filename="test.docx"
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
UEsDBBQABgAIAAAAIQDfpNJsWgEAACAFAAATAAgCW0NvbnRlbnRfVHlwZXNdLnhtbCCiBAIo
|
||||
AAAAgB4AAHdvcmQvc3R5bGVzLnhtbFBLBQYAAAAACwALAMECAADXKQAAAAA=
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: application/pdf; name="test.pdf"
|
||||
Content-Disposition: attachment; filename="test.pdf"
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
JVBERi0xLjUKJeLjz9MKNyAwIG9iago8PAovVHlwZSAvRm9udERlc2NyaXB0b3IKL0ZvbnRO
|
||||
MjM0NAolJUVPRgo=
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: application/vnd.openxmlformats-officedocument.spreadsheetml.sheet; name="test.xlsx"
|
||||
Content-Disposition: attachment; filename="test.xlsx"
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
UEsDBBQABgAIAAAAIQBi7p1oXgEAAJAEAAATAAgCW0NvbnRlbnRfVHlwZXNdLnhtbCCiBAIo
|
||||
AAoACgCAAgAAexwAAAAA
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: text/xml; charset=UTF-8; name="testxml.xml"
|
||||
Content-Disposition: attachment; filename="testxml.xml"
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
PD94bWwgdmVyc2lvbj0iMS4wIiBlbmNvZGluZz0iVVRGLTgiPz4KPCFET0NUWVBFIHN1aXRl
|
||||
VUtUZXN0Ii8+CiAgICAgICAgPC9jbGFzc2VzPgogICAgPC90ZXN0PgoKPC9zdWl0ZT4=
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: text/plain; charset=UTF-8; name="update.txt"
|
||||
Content-Disposition: attachment; filename="update.txt"
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
DQpHb2NlQERFU0tUT1AtQ0dONkZENiBNSU5HVzY0IC9jL1Byb2dyYW0gRmlsZXMvUHJvdG9u
|
||||
NFdqRUw5WkplbnJZcUZucXVvSFBEa0w5VWZFeTA0VlBYRkViVERWLVlQaS1BSWc9PSINCg==
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606
|
||||
Content-Type: text/calendar; charset=UTF-8; name="=?UTF-8?B?6YCZ5piv5ryi5a2X55qE5LiA5YCL5L6L5a2QLmljcw==?="
|
||||
Content-Disposition: attachment; filename*0*=UTF-8''%E9%80%99%E6%98%AF%E6%BC%A2%E5%AD%97%E7%9A%84%E4%B8%80; filename*1*=%E5%80%8B%E4%BE%8B%E5%AD%90%2E%69%63%73
|
||||
Content-Transfer-Encoding: base64
|
||||
|
||||
QkVHSU46VkNBTEVOREFSCk1FVEhPRDpQVUJMSVNIClZFUlNJT046Mi4wClgtV1ItQ0FMTkFN
|
||||
RDpWQUxBUk0KRU5EOlZFVkVOVApFTkQ6VkNBTEVOREFSCg==
|
||||
|
||||
--bc5bd30245232f31b6c976adcd59bb0069c9b13f986f9e40c2571bb80aa16606--
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# Copyright (c) 2023 Proton AG
|
||||
#
|
||||
# This file is part of Proton Mail Bridge.
|
||||
#
|
||||
# Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
# This script calculates coverage for bridge project
|
||||
#
|
||||
# Output:
|
||||
# stdout : total coverage (to be parsed by Gitlab pipeline)
|
||||
# coverage.xml : Cobertura format of covered lines for coverage visualization in Gitlab
|
||||
|
||||
# Assuming that test coverages from all jobs were put into `./coverage` folder
|
||||
# and passed as artifacts. The flags are:
|
||||
# -covermode=count
|
||||
# -coverpkg=github.com/ProtonMail/proton-bridge/v3/internal/...,github.com/ProtonMail/proton-bridge/v3/pkg/...,
|
||||
# -args -test.gocoverdir=$$PWD/coverage/${TOPIC}
|
||||
|
||||
|
||||
ALLINPUTS="coverage$(printf ",%s" coverage/*)"
|
||||
|
||||
go tool covdata textfmt \
|
||||
-i "$ALLINPUTS" \
|
||||
-o coverage_withGen.out
|
||||
|
||||
# Filter out auto-generated code
|
||||
grep -v '\.pb\.go' coverage_withGen.out > coverage.out
|
||||
|
||||
# Print out coverage
|
||||
go tool cover -func=./coverage.out | grep total:
|
||||
|
||||
# Convert to Cobertura
|
||||
#
|
||||
# NOTE: We are not using the latest `github.com/boumenot/gocover-cobertura`
|
||||
# because it does not support multiplatform coverage in one profile. See
|
||||
# https://github.com/boumenot/gocover-cobertura/pull/3#issuecomment-1571586099
|
||||
go get github.com/t-yuki/gocover-cobertura
|
||||
go run github.com/t-yuki/gocover-cobertura < ./coverage.out > coverage.xml
|
|
@ -49,6 +49,8 @@ generate_dep_licenses(){
|
|||
sed -i -r '/^github.com\/therecipe\/qt\/internal\/binding\/files\/docs\//d;' "$tmpDepLicenses"
|
||||
sed -i -r 's|^(.*)/([[:alnum:]-]+)/(v[[:digit:]]+)$|* [\2](https://\1/\2/\3)|g' "$tmpDepLicenses"
|
||||
sed -i -r 's|^(.*)/([[:alnum:]-]+)$|* [\2](https://\1/\2)|g' "$tmpDepLicenses"
|
||||
sed -i -r 's|^(.*)/([[:alnum:]-]+).(v[[:digit:]]+)$|* [\2](https://\1/\2.\3)|g' "$tmpDepLicenses"
|
||||
|
||||
|
||||
## add license file to github links, and others
|
||||
sed -i -r '/github.com/s|^(.*(https://[^)]+).*)$|\1 available under [license](\2/blob/master/LICENSE) |g' "$tmpDepLicenses"
|
||||
|
@ -57,8 +59,14 @@ generate_dep_licenses(){
|
|||
sed -i -r '/golang.org\/x/s|^(.*golang.org/x/([^)]+).*)$|\1 available under [license](https://cs.opensource.google/go/x/\2/+/master:LICENSE) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/google.golang.org\/grpc/s|^(.*)$|\1 available under [license](https://github.com/grpc/grpc-go/blob/master/LICENSE) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/google.golang.org\/protobuf/s|^(.*)$|\1 available under [license](https://github.com/protocolbuffers/protobuf/blob/main/LICENSE) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/go.uber.org\/goleak/s|^(.*)$|\1 available under [license](https://pkg.go.dev/go.uber.org/goleak?tab=licenses) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/ariga.io\/atlas/s|^(.*)$|\1 available under [license](https://github.com/ariga/atlas/blob/master/LICENSE) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/entgo.io\/ent/s|^(.*)$|\1 available under [license](https://pkg.go.dev/entgo.io/ent?tab=licenses) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/google.golang.org\/genproto/s|^(.*)$|\1 available under [license](https://pkg.go.dev/google.golang.org/genproto?tab=licenses) |g' "$tmpDepLicenses"
|
||||
sed -i -r '/gopkg.in\/yaml\.v3/s|^(.*)$|\1 available under [license](https://github.com/go-yaml/yaml/blob/v3.0.1/LICENSE) |g' "$tmpDepLicenses"
|
||||
}
|
||||
|
||||
|
||||
check_dependecies(){
|
||||
generate_dep_licenses
|
||||
|
||||
|
|
|
@ -0,0 +1,91 @@
|
|||
// Copyright (c) 2023 Proton AG
|
||||
//
|
||||
// This file is part of Proton Mail Bridge.
|
||||
//
|
||||
// Proton Mail Bridge is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// Proton Mail Bridge is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with Proton Mail Bridge. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/emersion/go-sasl"
|
||||
"github.com/emersion/go-smtp"
|
||||
)
|
||||
|
||||
var (
|
||||
serverURL = flag.String("server", "127.0.0.1:1025", "SMTP server address:port")
|
||||
userName = flag.String("user-name", "user", "SMTP user name")
|
||||
userPassword = flag.String("user-pwd", "password", "SMTP user password")
|
||||
toAddr = flag.String("toAddr", "", "Address toAddr whom toAddr send the message")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
flag.Usage = func() {
|
||||
fmt.Printf("Usage %v [options] file0 ... fileN\n", os.Args[0])
|
||||
fmt.Printf("\nOptions:\n")
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
if len(*toAddr) == 0 {
|
||||
panic(fmt.Errorf("to flag can't be empty"))
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) == 0 {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
|
||||
smtpClient, err := smtp.Dial(*serverURL)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to connect to server: %w", err))
|
||||
}
|
||||
defer func() { _ = smtpClient.Close() }()
|
||||
|
||||
// Upgrade to TLS.
|
||||
if err := smtpClient.StartTLS(&tls.Config{InsecureSkipVerify: true}); err != nil {
|
||||
panic(fmt.Errorf("failed to starttls: %w", err))
|
||||
}
|
||||
|
||||
// Authorize with SASL PLAIN.
|
||||
if err := smtpClient.Auth(sasl.NewPlainClient(
|
||||
*userName,
|
||||
*userName,
|
||||
*userPassword,
|
||||
)); err != nil {
|
||||
panic(fmt.Errorf("failed to login: %w", err))
|
||||
}
|
||||
|
||||
for idx, v := range args {
|
||||
fileData, err := os.ReadFile(v)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to read file:%v - %w", v, err))
|
||||
}
|
||||
|
||||
// Send the message.
|
||||
if err := smtpClient.SendMail(
|
||||
*userName,
|
||||
[]string{*toAddr},
|
||||
bytes.NewReader(fileData),
|
||||
); err != nil {
|
||||
panic(fmt.Errorf("failed to send msg %v: %w", idx, err))
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue