diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..513c6f5 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,7 @@ +* +!Makefile +!go.mod +!go.sum +!main.go +!internal +!hack diff --git a/Design.md b/Design.md new file mode 100644 index 0000000..6e9b4f4 --- /dev/null +++ b/Design.md @@ -0,0 +1,80 @@ + +### Logwatcher Design + +Logwatcher uses Linux inotify[1] to add watches to directories and files generated in "/var/log/pods". Different +set of watch flags are used for directories and files. + +Watches for folders: + - `IN_CREATE`: watch is triggered when a file or direcory is created in this directory. + - `IN_DELETE`: watch is triggered when a file or direcory is deleted in this direcory. + +Watches for files: + - `IN_MODIFY`: watch is triggered when file is written. + - `IN_CLOSE_WRITE`: watch is triggered when file is closed for writing. + - `IN_ONESHOT`: When ONESHOT flag is used with other set of flags, when the condition for watch is fulfilled, + and a message is sent, the watch is deleted after sending the message. Application needs to set the watch again + if needed. + +When some activity on directories and files, watches are triggered, and a message is sent on watch file descriptor. +Since watches on files are not kept permanently and deleted as soon as the watch is triggered because of ONESHOT flag, +messages do not accumulate in the inotify queue. Without ONESHOT flag, with logs continuosely getting writen, queue can +get full very easily and messages start getting dropped. + +### Watch Structure + + + /var/log/pods/ (IN_CREATE|IN_DELETE) + │ + │ + ├───── openshift-kube-controller-manager_kube-controller-manager-crc-pbwlw-master-0_738a9f84e9aed99070694fd38123a679/ (IN_CREATE|IN_DELETE) + │ │ + │ ├────────── cluster-policy-controller/ (IN_CREATE|IN_DELETE) + │ │ │ + │ │ │ 0.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ │ 4.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ │ 5.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ └────── 6.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ 7.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ 8.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ + │ │ + │ └────────── kube-controller-manager/ (IN_CREATE|IN_DELETE) + │ │ + │ │ 0.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ 10.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ │ 11.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ └────── 12.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 13.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 9.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ + │ + └────── openshift-cluster-version_cluster-version-operator-8b9c98bfd-8mj5d_60452d84-5db1-4e5f-815c-245aaa76cbb9/ (IN_CREATE|IN_DELETE) + │ + │ + └───────── cluster-version-operator/ (IN_CREATE|IN_DELETE) + │ + │ 0.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 1.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 1.log.20230102-180708 (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 2.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 2.log.20230104-094208.gz (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 2.log.20230104-182347.gz (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + │ 2.log.20230105-030537.gz (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + └────── 2.log.20230105-114647 (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + 3.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + 4.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + 4.log.20230111-190053 (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + 5.log (IN_MODIFY|IN_CLOSE_WRITE|IN_ONESHOT) + +### Read Loop +A goroutine reads the inotify file descriptor and reads the raw events posted to the inotify file descriptor, and sends an +event to the Event Loop for handeling over a buffered channel. + +### Event Loop +A goroutine reads the buffered channel, and processes the NotifyEvent. + + + +### References + [1] [Filesystem notification, part 2: A deeper investigation of inotify](https://lwn.net/Articles/605128/) + diff --git a/Dockerfile b/Dockerfile index e4221c9..59c789a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,42 +1,19 @@ -### This is a generated file from Dockerfile.in ### -#@follow_tag(registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder:rhel_8_golang_1.18) FROM registry.access.redhat.com/ubi8/go-toolset AS builder -ENV BUILD_VERSION=1.1.0 -ENV OS_GIT_MAJOR=1 -ENV OS_GIT_MINOR=1 -ENV OS_GIT_PATCH=0 -ENV SOURCE_GIT_COMMIT=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_COMMIT} -ENV SOURCE_GIT_URL=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_URL} -ENV REMOTE_SOURCE=${REMOTE_SOURCE:-.} - - +WORKDIR /go/src/github.com/ViaQ/logwatcher +COPY . . USER 0 -WORKDIR /go/src/github.com/log-file-metric-exporter -COPY ${REMOTE_SOURCE} . - +RUN go mod download RUN make build -#@follow_tag(registry.redhat.io/ubi8:latest) -FROM registry.access.redhat.com/ubi8 -COPY --from=builder /go/src/github.com/log-file-metric-exporter/bin/log-file-metric-exporter /usr/local/bin/. -COPY --from=builder /go/src/github.com/log-file-metric-exporter/hack/log-file-metric-exporter.sh /usr/local/bin/. - -RUN chmod +x /usr/local/bin/log-file-metric-exporter -RUN chmod +x /usr/local/bin/log-file-metric-exporter.sh - -LABEL \ - io.k8s.display-name="OpenShift LogFileMetric Exporter" \ - io.k8s.description="OpenShift LogFileMetric Exporter component of OpenShift Cluster Logging" \ - License="Apache-2.0" \ - name="openshift-logging/log-file-metric-exporter-rhel8" \ - com.redhat.component="log-file-metric-exporter-container" \ - io.openshift.maintainer.product="OpenShift Container Platform" \ - io.openshift.maintainer.component="Logging" \ - io.openshift.build.commit.id=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_COMMIT} \ - io.openshift.build.source-location=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_URL} \ - io.openshift.build.commit.url=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_URL}/commit/${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_COMMIT} \ - version=v1.1.0 - -CMD ["sh", "-c", "/usr/local/bin/log-file-metric-exporter.sh"] - +#FROM registry.access.redhat.com/ubi8 +FROM centos:centos8 +USER 0 +RUN sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-* +RUN sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-* +#RUN echo 'kernel.yama.ptrace_scope = 0' >> /etc/sysctl.conf +RUN yum -y install strace +RUN yum -y install trace-cmd +COPY --from=builder /go/src/github.com/ViaQ/logwatcher/bin/logwatcher /usr/local/bin/ +COPY --from=builder /go/src/github.com/ViaQ/logwatcher/hack/list-watches.sh /usr/local/bin/ +CMD ["sh", "-c", "/usr/local/bin/logwatcher", "-watch_dir=/var/log/pods", "-v=0", "-logtostderr=true"] diff --git a/Dockerfile.in b/Dockerfile.in deleted file mode 100644 index be41275..0000000 --- a/Dockerfile.in +++ /dev/null @@ -1,43 +0,0 @@ -#@follow_tag(registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder:rhel_8_golang_1.18) -FROM registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder:v1.18.4-202207291631.el8.g2ee6935 AS builder - -ENV BUILD_VERSION=1.1.0 -ENV OS_GIT_MAJOR=1 -ENV OS_GIT_MINOR=1 -ENV OS_GIT_PATCH=0 -ENV SOURCE_GIT_COMMIT=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_COMMIT} -ENV SOURCE_GIT_URL=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_URL} -ENV REMOTE_SOURCE=${REMOTE_SOURCE:-.} - -## EXCLUDE BEGIN ## -ENV REMOTE_SOURCE=${REMOTE_SOURCE}/app -## EXCLUDE END ## - -USER 0 -WORKDIR /go/src/github.com/log-file-metric-exporter -COPY ${REMOTE_SOURCE} . - -RUN make build - -#@follow_tag(registry.redhat.io/ubi8:latest) -FROM registry.redhat.io/ubi8:8.4-209 -COPY --from=builder /go/src/github.com/log-file-metric-exporter/bin/log-file-metric-exporter /usr/local/bin/. -COPY --from=builder /go/src/github.com/log-file-metric-exporter/hack/log-file-metric-exporter.sh /usr/local/bin/. - -RUN chmod +x /usr/local/bin/log-file-metric-exporter -RUN chmod +x /usr/local/bin/log-file-metric-exporter.sh - -LABEL \ - io.k8s.display-name="OpenShift LogFileMetric Exporter" \ - io.k8s.description="OpenShift LogFileMetric Exporter component of OpenShift Cluster Logging" \ - License="Apache-2.0" \ - name="openshift-logging/log-file-metric-exporter-rhel8" \ - com.redhat.component="log-file-metric-exporter-container" \ - io.openshift.maintainer.product="OpenShift Container Platform" \ - io.openshift.maintainer.component="Logging" \ - io.openshift.build.commit.id=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_COMMIT} \ - io.openshift.build.source-location=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_URL} \ - io.openshift.build.commit.url=${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_URL}/commit/${CI_LOG_FILE_METRIC_EXPORTER_UPSTREAM_COMMIT} \ - version=v1.1.0 - -CMD ["sh", "-c", "/usr/local/bin/log-file-metric-exporter.sh"] diff --git a/ISSUES.md b/ISSUES.md new file mode 100644 index 0000000..9853741 --- /dev/null +++ b/ISSUES.md @@ -0,0 +1,18 @@ + + +#### Watch descriptor reused by inotify + +eventLoop: + + 1. program adds a watch on a file, gets a watch fd (say `a0`) + 2. file gets modified, program gets an event on watch fd `a0` + 3. (since `IN_ONESHOT` was used, the watch is auto deleted by inotify now.) + 4. program handles the modify (`0x2`) event and does its logic + 5. program adds a watch again on the same file, gets a new watch fd (say `a1`) (this step is same as step 1.) + +While testing it is observed that after some iterations of the above loop, the watch fd returned in the step 5 is same +as one returned in step 1. Once this happens, there are no more events received for file changes. the loop essentially halts. + +#### Workaround +Save the watch desctiptor for files, and compare the new watch descriptor with earlier one, if same add watch again will the returned watch descriptor is different. +https://github.com/vimalk78/logwatcher/blob/main/internal/inotify/watch.go#L49-L64 diff --git a/Makefile b/Makefile old mode 100755 new mode 100644 index fced61d..78edd6b --- a/Makefile +++ b/Makefile @@ -1,73 +1,12 @@ -export GOROOT=$(shell go env GOROOT) -export GOFLAGS= -export GO111MODULE=on -ARTIFACT_DIR?=./tmp -CURPATH=$(PWD) -GOFLAGS?= -CLO_RELEASE_VERSION?=5.4 -BIN_NAME=log-file-metric-exporter -IMAGE_REPOSITORY_NAME=quay.io/openshift-logging/origin-${BIN_NAME}:${CLO_RELEASE_VERSION} -LOCAL_IMAGE_TAG=127.0.0.1:5000/openshift/origin-${BIN_NAME}:${CLO_RELEASE_VERSION} -#just for testing purpose pushing it to docker.io -MAIN_PKG=cmd/main.go -TARGET_DIR=$(CURPATH)/_output -TARGET=$(CURPATH)/bin/$(BIN_NAME) -BUILD_GOPATH=$(TARGET_DIR) +IMAGE=viaq/logwatcher:v0.0.1 +TARGET=bin/logwatcher +MAIN_PKG=main.go -#inputs to 'run' which may need to change -TLS_CERTS_BASEDIR=_output -NAMESPACE ?= "openshift-logging" -ES_CERTS_DIR ?= "" -CACHE_EXPIRY ?= "5s" - -PKGS=$(shell go list ./...) -TEST_OPTIONS?= - - - -all: fmt build image deploy-image -.PHONY: all - -artifactdir: - @mkdir -p $(ARTIFACT_DIR) - - -fmt: - @gofmt -l -w cmd && \ - gofmt -l -w pkg -.PHONY: fmt - -build: fmt - go build $(LDFLAGS) -o $(TARGET) $(MAIN_PKG) .PHONY: build +build: + go build $(LDFLAGS) -o $(TARGET) $(MAIN_PKG) -image: - podman build -f Dockerfile -t $(LOCAL_IMAGE_TAG) . - podman tag ${LOCAL_IMAGE_TAG} ${IMAGE_REPOSITORY_NAME} .PHONY: image - -deploy-image: image - IMAGE_TAG=$(LOCAL_IMAGE_TAG) hack/deploy-image.sh - IMAGE_TAG=$(IMAGE_REPOSITORY_NAME) hack/deploy-image.sh -.PHONY: deploy-image - -clean: - rm -rf $(TARGET_DIR) -.PHONY: clean - -COVERAGE_DIR=$(ARTIFACT_DIR)/coverage -test: artifactdir - @mkdir -p $(COVERAGE_DIR) - @go test -race -coverprofile=$(COVERAGE_DIR)/test-unit.cov ./pkg/... - @go test -v ./cmd - @go tool cover -html=$(COVERAGE_DIR)/test-unit.cov -o $(COVERAGE_DIR)/test-unit-coverage.html - @go tool cover -func=$(COVERAGE_DIR)/test-unit.cov | tail -n 1 -.PHONY: test - -lint: - @hack/run-linter -.PHONY: lint -gen-dockerfiles: - ./hack/generate-dockerfile-from-midstream > Dockerfile -.PHONY: gen-dockerfiles +image: + podman build -f Dockerfile . -t $(IMAGE) diff --git a/README.md b/README.md index c422a3d..76af2c0 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ -# log-file-metric-exporter -Exporter to collect metrics about container logs being produced in a kubernetes environment -It publishes log_logged_bytes_total metric in prometheus. This metric allows one to see total data bytes actually logged vs. what collector (fluentd) is able to collect during runtime. -This implementation is based on Golang and it uses fsnotify package to watch out for new data written to log files residing in the Watcher path. +### LogWatcher +Monitors logging root folder "/var/log/pods" for log activity by containers, and generates metrics about amount of logs produced by each container. +### Design +Please refer to [Design.md](./Design.md) diff --git a/TODO.md b/TODO.md new file mode 100644 index 0000000..5698e25 --- /dev/null +++ b/TODO.md @@ -0,0 +1,5 @@ + + +1. Add metrics +2. Add Unit tests +3. Fix the problem identified in ISSUES.md diff --git a/cmd/main.go b/cmd/main.go deleted file mode 100644 index fe66918..0000000 --- a/cmd/main.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "flag" - "net/http" - "os" - - "github.com/ViaQ/logerr/log" - "github.com/log-file-metric-exporter/pkg/logwatch" - "github.com/prometheus/client_golang/prometheus/promhttp" -) - -var ( - logDir = "/var/log/pods" -) - -func main() { - var ( - dir string - addr string - crtFile string - keyFile string - verbosity int - ) - flag.StringVar(&dir, "dir", logDir, "Directory containing log files") - flag.IntVar(&verbosity, "verbosity", 0, "set verbosity level") - flag.StringVar(&addr, "http", ":2112", "HTTP service address where metrics are exposed") - flag.StringVar(&crtFile, "crtFile", "/etc/fluent/metrics/tls.crt", "cert file for log-file-metric-exporter service") - flag.StringVar(&keyFile, "keyFile", "/etc/fluent/metrics/tls.key", "key file for log-file-metric-exporter service") - flag.Parse() - - log.SetLogLevel(verbosity) - log.Info("start log metric exporter", "path", dir) - - w, err := logwatch.New(dir) - if err != nil { - log.Error(err, "watch error", "path", dir) - os.Exit(1) - } - - go func() { - if err := w.Watch(); err != nil { - log.Error(err, "error in watch", "path", dir) - os.Exit(1) - } - }() - - http.Handle("/metrics", promhttp.Handler()) - if err := http.ListenAndServeTLS(addr, crtFile, keyFile, nil); err != nil { - log.Error(err, "error in HTTP listen", "addr", addr) - os.Exit(1) - } -} diff --git a/cmd/main_test.go b/cmd/main_test.go deleted file mode 100644 index 6f20685..0000000 --- a/cmd/main_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package main_test - -import ( - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "syscall" - "testing" - "time" - - "github.com/log-file-metric-exporter/test/scraper" - dto "github.com/prometheus/client_model/go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -const url = "https://localhost:2112/metrics" - -// runMain runs the metric exporter watching dir. -func runMain(t *testing.T, dir string) { - t.Helper() - cmd := exec.Command("go", "run", "main.go", "-dir="+dir, "-crtFile=testdata/server.crt", "-keyFile=testdata/server.key") - cmd.Stdout, cmd.Stderr = os.Stderr, os.Stderr - cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true} // create session so we can kill go run and sub-processes - require.NoError(t, cmd.Start()) - t.Cleanup(func() { - require.NoError(t, syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)) - _ = cmd.Wait() - }) -} - -// Test that scraped metrics have the correct labels. -func TestScrapeMetrics(t *testing.T) { - // create directories for test logs - tmpDir, err := ioutil.TempDir("", t.Name()) - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - runMain(t, tmpDir) - - // Create a log file - path := filepath.Join(tmpDir, "test-qegihyox_functional_19b40c1b-df6d-4e63-b5aa-d6c5ed20ac4e/something/0.log") - require.NoError(t, os.MkdirAll(filepath.Dir(path), 0700)) - s := scraper.New() - findMetric := func() *dto.Metric { - mfs, err := s.Scrape(url) - require.NoError(t, err) - if mf := mfs["log_logged_bytes_total"]; mf != nil { - return scraper.FindMetric(mf, "poduuid", "19b40c1b-df6d-4e63-b5aa-d6c5ed20ac4e") - } - return nil - } - - // Write to log and scrape metric till eventually the exporter has updated the metric. - data := []byte("hello\n") - require.Eventually(t, func() bool { - require.NoError(t, ioutil.WriteFile(path, data, 0600)) - if m := findMetric(); m != nil { - assert.Equal(t, float64(len(data)), *m.Counter.Value) - assert.Equal(t, scraper.Labels(m), map[string]string{ - "containername": "something", - "namespace": "test-qegihyox", - "podname": "functional", - "poduuid": "19b40c1b-df6d-4e63-b5aa-d6c5ed20ac4e", - }) - return true - } - return false - }, 10*time.Second, time.Second/10) - - // Write more data, should be detected - f, err := os.OpenFile(path, os.O_APPEND|os.O_WRONLY|os.O_CREATE, 0600) - require.NoError(t, err) - defer func() { _ = f.Close() }() - assert.Eventually(t, func() bool { - _, err = f.WriteString("more data\n") - require.NoError(t, err) - m := findMetric() - return m != nil && *m.Counter.Value > float64(len(data)) - }, 10*time.Second, time.Second/10) - - // Remove the log, make sure the metric is eventually removed. - os.Remove(path) - require.Eventually(t, func() bool { return findMetric() == nil }, 10*time.Second, time.Second/10) -} diff --git a/cmd/testdata/server.crt b/cmd/testdata/server.crt deleted file mode 100644 index fccce65..0000000 --- a/cmd/testdata/server.crt +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDfzCCAmegAwIBAgIUUZIEdmv7QaNkobV6C6+uHWLjGRkwDQYJKoZIhvcNAQEL -BQAwTzELMAkGA1UEBhMCQ0ExCzAJBgNVBAgMAlFDMRAwDgYDVQQHDAdub3doZXJl -MRAwDgYDVQQKDAdub3RoaW5nMQ8wDQYDVQQDDAZub2JvZHkwHhcNMjIwMzMxMjAy -MTU4WhcNMjIwNDMwMjAyMTU4WjBPMQswCQYDVQQGEwJDQTELMAkGA1UECAwCUUMx -EDAOBgNVBAcMB25vd2hlcmUxEDAOBgNVBAoMB25vdGhpbmcxDzANBgNVBAMMBm5v -Ym9keTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAPZ2zFLtln5/n8eq -ymBjZI8XjI09xm4rmoUiOUDBKq/IGIqcw+CdoYg/RtRkc9zRfmK/oiD77GDeTvng -WxY7i5IV/rYlBq6y/VnkYVaAJ7an8rdE7/2P4L9PzHhVZ/KvX7/P+KH2o+CkUeXx -JEk03d/7VHbOKCS+yQSw4Ys1QRETpDTWF2zcARfE/qqkf8SHWS9crtjHrSpN1BTs -l/rR7ok7pl2gy6qsNPYE/C5ZE0vX5/LkR5g6oYR+ePHs3pUxbJSi7H2M/h55wVqz -2Z2/Gs1DGoUgyCXFR0B75BNKemggH2dcP4Hu3f5n2uiDo4wNoAkvzSbdXNAmwOfV -7wztz/MCAwEAAaNTMFEwHQYDVR0OBBYEFDpqD1q1RwfCcuvK4AibRsBh1mK+MB8G -A1UdIwQYMBaAFDpqD1q1RwfCcuvK4AibRsBh1mK+MA8GA1UdEwEB/wQFMAMBAf8w -DQYJKoZIhvcNAQELBQADggEBALyNdXROCRVVNHK16/Tup3mq690FaWhr0DF1qSBO -QVmo9CtGVaMbCxu2cYattSntYlWYBJpfIFCKl/9IGAmsW53bQlBCq8b/3w4hlHez -hNLm07ealFOdw/2uPqlzkJSSiAcUmHxSrKkcaf2pCPvVY4ibIqflHBg5R5Zv8pV8 -FdsSnUwkYIiDU0sQTJRyKWbC0ntuq3xkGPvKoHKAMeB79uqT0KXXOw/lhyXQVqX6 -0BgNX8ES9bfwOvfQCbS20iQXy/9zEJURPbInNZbU2JXJDhb8c+pbFacF9SlYn9TN -3x8jPrMAWYMi8cA9GqSwrBvvgNQFE2M0yGZqXz9uOsYW714= ------END CERTIFICATE----- diff --git a/cmd/testdata/server.key b/cmd/testdata/server.key deleted file mode 100644 index 4402a62..0000000 --- a/cmd/testdata/server.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD2dsxS7ZZ+f5/H -qspgY2SPF4yNPcZuK5qFIjlAwSqvyBiKnMPgnaGIP0bUZHPc0X5iv6Ig++xg3k75 -4FsWO4uSFf62JQausv1Z5GFWgCe2p/K3RO/9j+C/T8x4VWfyr1+/z/ih9qPgpFHl -8SRJNN3f+1R2zigkvskEsOGLNUERE6Q01hds3AEXxP6qpH/Eh1kvXK7Yx60qTdQU -7Jf60e6JO6ZdoMuqrDT2BPwuWRNL1+fy5EeYOqGEfnjx7N6VMWyUoux9jP4eecFa -s9mdvxrNQxqFIMglxUdAe+QTSnpoIB9nXD+B7t3+Z9rog6OMDaAJL80m3VzQJsDn -1e8M7c/zAgMBAAECggEBANEA/VR11mF/qOeRlf0XTHdr6Oz9rgBPScIMu7CGHsTi -Uq15M0KPaiQ4RMBoTzi7Dwp+p+aAvHuJCkAsWhIIfNtte4rK61fKwb4xRbotA4CJ -+/Ieq8XC9TxHV4XUJ0pOHkxy+M2gcmhV8H2meLb79guy/nWhbbl7cMxhk2KRzpPD -Aj6wuSg7eShoh43oVTYMnTb2zGHNhVAgQ3YtfA5A1qqVju7Q59Jxa7Dom+8HqZGU -q4vznO9YD709DbcEnTREuDO7QVEoPbk0TpUGAmQR53yhjCw6dRalMVFnC1KMDyNN -IhrkshTe4eycX9dmFyDl3L91U1AcuSKpJcui76u4V0ECgYEA+8xiPVOkTDm9Unal -pWbnBlPR096eUyMNW99sFuNIcTNotfm917xB1ENBJrGYdBSCwRMj4kOnRzBCgkEq -+l3RUvk2WbGj+/w3BhuE8rsp1FG00fze8gZ+pyzVwuvctqbrW3hDExORXZtYcCrx -NY0nIcdDv0/wKeRs7KmnrnJsTOcCgYEA+pOgqawxUzIdaiMxNpKut5cXuB0pInf9 -DbfpS++3TxJ6kI8TdE2FUvE+Vt7xOZOwHKnhTJcRT9bPY321dBOMQ1tJ1klEqRi1 -AXDWIcM0HwXG6zNJT5zs5QfIDAgI+JB4XGvE1jsQoxV/dWjmMo+D7dpc4JlZbENP -Ej16JiavVxUCgYEAnt+zsKxAw1O/L/PqynUmGajzgPI7BYvdVvWe1sYrpDlivUdJ -0IBZLNwOlKKD5eI3Kimwf4heWJ2LqXIC8xwVAnx6HZfnNwxJj5H3jcwoNW4kYEZk -SWNtsH+qlKWLirfBUuKLt9Kl6uEkqQl/TmG0qix3g5OlnlkhnSXixkAuM0sCgYBq -aPzmcQh1Uyfopc9FZJ8ruc9J0aBhUeE1zS8Wn1MSEj/YWdoiHrWKloYpr/1yvQQh -+AaEVvb0F730ezLue0qqY/8EMBMnWCWt2fQlXFcrAXIK+bmdRH/Zdyd97H+axwdG -AEIT0/tptjxtaMGxIP8eqAefHaA6+I6AqXjQ8MavTQKBgAgI4fUC1tE0JZaewJvG -Jq+LU0lGEzDLAdS4JLcqbjP2aiaTQthTzaw7lrM3mC85y8oVKPYRTwsNtcJROB7c -xuJQSdGXvdgTg6qIYnxZSRCP+RDrI6dw1dNnpY6DdeAjn+zGTGiH+q2w1OjMW79H -8/iWd80rVGV2HEIM8yCOZKvC ------END PRIVATE KEY----- diff --git a/go.mod b/go.mod index c90f144..b372335 100644 --- a/go.mod +++ b/go.mod @@ -1,26 +1,17 @@ -module github.com/log-file-metric-exporter +module github.com/ViaQ/logwatcher go 1.18 -require ( - github.com/ViaQ/logerr v1.0.9 - github.com/fsnotify/fsnotify v1.6.0 - github.com/prometheus/client_golang v1.12.1 - github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.33.0 - github.com/stretchr/testify v1.4.0 -) - require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v0.2.1 // indirect + github.com/golang/glog v1.0.0 // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/procfs v0.7.3 // indirect - golang.org/x/sys v0.0.0-20220908164124-27713097b956 // indirect - google.golang.org/protobuf v1.26.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect + github.com/prometheus/client_golang v1.14.0 // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.37.0 // indirect + github.com/prometheus/procfs v0.8.0 // indirect + golang.org/x/sys v0.3.0 // indirect + google.golang.org/protobuf v1.28.1 // indirect ) diff --git a/go.sum b/go.sum index ccf0649..2ac43d2 100644 --- a/go.sum +++ b/go.sum @@ -33,8 +33,6 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/ViaQ/logerr v1.0.9 h1:37DLQW3QYFAkD57X+99AClUaM0E8WMgzUplMjdvsJsY= -github.com/ViaQ/logerr v1.0.9/go.mod h1:KZ3ne81U/sJhHt3AjE5AvhoQDY0Rh1O+u4rEHKjG/No= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -54,14 +52,11 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= @@ -73,11 +68,11 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9 github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= -github.com/go-logr/logr v0.2.1 h1:fV3MLmabKIZ383XifUjFSwcoGee0v9qgPp8wy5svibE= -github.com/go-logr/logr v0.2.1/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -115,7 +110,6 @@ github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -146,10 +140,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -163,31 +155,33 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= +github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.33.0 h1:rHgav/0a6+uYgGdNt3jwz8FNSesO/Hsang3O0T9A5SE= -github.com/prometheus/common v0.33.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRLCZYc7JZTNE= +github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= +github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= +github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -196,7 +190,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -327,8 +320,8 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956 h1:XeJjHH1KiLpKGb6lvMiksZ9l0fVUh+AmGcm0nOMEBOY= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -384,7 +377,6 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= @@ -460,12 +452,12 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -473,7 +465,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/hack/build-component-image.sh b/hack/build-component-image.sh deleted file mode 100755 index d9314dc..0000000 --- a/hack/build-component-image.sh +++ /dev/null @@ -1,22 +0,0 @@ -#! /bin/bash - -set -euo pipefail - -dir=$1 -fullimagename=$2 - -tag_prefix="${OS_IMAGE_PREFIX:-"openshift/origin-"}" - - -dockerfile=Dockerfile - -dfpath=${dir}/${dockerfile} - -echo "----------------------------------------------------------------------------------------------------------------" -echo "- -" -echo "Building image $dir - this may take a few minutes until you see any output..." -echo "- -" -echo "----------------------------------------------------------------------------------------------------------------" -buildargs="" - -podman build $buildargs -f $dfpath -t "$fullimagename" $dir diff --git a/hack/deploy-image.sh b/hack/deploy-image.sh deleted file mode 100755 index fa5a6db..0000000 --- a/hack/deploy-image.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -echo "Setting up port-forwarding to remote registry ..." -coproc oc -n openshift-image-registry port-forward service/image-registry 5000:5000 -trap "kill -15 $COPROC_PID" EXIT -read PORT_FORWARD_STDOUT <&"${COPROC[0]}" -if [[ "$PORT_FORWARD_STDOUT" =~ ^Forwarding.*5000$ ]] ; then - user=$(oc whoami | sed s/://) - echo "Login to registry..." - podman login --tls-verify=false -u ${user} -p $(oc whoami -t) 127.0.0.1:5000 - - echo "Pushing image ${IMAGE_TAG} ..." - if podman push --tls-verify=false ${IMAGE_TAG} ; then - oc -n openshift get imagestreams | grep log-file-metric-exporter - fi -else - echo "Unexpected message from oc port-forward: $PORT_FORWARD_STDOUT" -fi diff --git a/hack/generate-dockerfile-from-midstream b/hack/generate-dockerfile-from-midstream deleted file mode 100755 index 17c99a4..0000000 --- a/hack/generate-dockerfile-from-midstream +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 - -import re,sys,yaml -import os.path - -dockerfileInFile = "Dockerfile.in" -dockerfileOut = "Dockerfile" -metaFile = "origin-meta.yaml" - -if len(sys.argv) > 1: - dockerfileInFile = sys.argv[1] -if len(sys.argv) > 2: - dockerfileOut = sys.argv[2] - -with open(dockerfileInFile, 'r') as f: - dockerfileIn = f.read() - -metaFile = os.path.join(os.path.dirname(dockerfileInFile), metaFile) -with open(metaFile, 'r') as f: - metaYaml = yaml.safe_load(f) - -froms = metaYaml['from'] -if froms and len(froms) > 0: - for base in froms: - dockerfileIn = re.sub("FROM " + base['source'],"FROM " + base['target'],dockerfileIn) - -#Remove aliases if only one is defined otherwise it will fail -aliases = [] -froms = 0 -for l in dockerfileIn.split("\n"): - if l.startswith("FROM"): - froms = froms + 1 - index = l.rfind("AS") - if index > -1: - aliases.append(l[index + 3:]) -if len(aliases) == 1 and froms == 1: - dockerfileIn = re.sub("--from=" + aliases[0],"",dockerfileIn) - -exclude = False -print("### This is a generated file from Dockerfile.in ###") -for l in dockerfileIn.split('\n'): - if l == "## EXCLUDE BEGIN ##": - exclude = True - continue - if l == "## EXCLUDE END ##": - exclude = False - continue - if not exclude: - print(l) diff --git a/hack/json-log-gen.yaml b/hack/json-log-gen.yaml new file mode 100644 index 0000000..d7c2587 --- /dev/null +++ b/hack/json-log-gen.yaml @@ -0,0 +1,58 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: my-project2 +spec: + finalizers: + - kubernetes +--- +apiVersion: v1 +kind: Pod +metadata: + labels: + component: test + name: json-log-generator + namespace: my-project2 +code: &code-str | + import time,json,sys,datetime + def set_vals(): + data["msgcontent"]="My life is my message" + data["facility_key"]="local0" + data["severity_key"]="Informational" + data["mykey0"]="myvalue" + data["mykey1"]="myvalue" + data["mykey2"]="myvalue" + data["mykey3"]="myvalue" + data["mykey4"]="myvalue" + data["mykey5"]="myvalue" + data["mykey6"]="myvalue" + data["mykey7"]="myvalue" + data["mykey8"]="myvalue" + data["mykey9"]="myvalue" + i=0 + while True: + i=i+1 + ts=time.time() + data={ + "timestamp" :datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S'), + "index" :i, + } + set_vals() + print json.dumps(data) + sys.stdout.flush() + // uncomment below two lines for testing a container restart. + // once container exits, it will be restarted, and log file will rotate as 0.log, 1.log, 2.log .... + // if i == 1000: + // sys.exit(-1) + time.sleep(0.1) # sleep for 100 msec + +spec: + containers: + - args: + - python2 + - -c + - *code-str + image: centos:centos7 + imagePullPolicy: IfNotPresent + name: log-generator + resources: {} diff --git a/hack/list-watches.sh b/hack/list-watches.sh new file mode 100755 index 0000000..f08a464 --- /dev/null +++ b/hack/list-watches.sh @@ -0,0 +1,4 @@ +inotify_file=$(find /proc/1/fd -lname anon_inode:inotify) +inotify_fd=$(basename $inotify_file) +while true; do cat /proc/1/fdinfo/$inotify_fd;echo ""; num=$(cat /proc/1/fdinfo/$inotify_fd| grep ^inotify | wc -l);echo "Total watches: $num"; sleep 1; done + diff --git a/hack/log-file-metric-exporter.service b/hack/log-file-metric-exporter.service deleted file mode 100644 index d21dd7b..0000000 --- a/hack/log-file-metric-exporter.service +++ /dev/null @@ -1,14 +0,0 @@ -[Unit] -Description= log-file-metric-exporter service watch out change in logfiles written to disk by conmon processes and compute, publish log_logged_bytes_total metric via go based prometheus client -After=network.target - -[Service] -Type=simple -ExecStart=/bin/bash /usr/local/bin/log-file-metric-exporter.sh -Restart=on-failure -StartLimitInterval=90 -StartLimitBurst=3 -PIDFile=/var/run/logfilemetricexporter.pid - -[Install] -WantedBy=multi-user.target diff --git a/hack/log-file-metric-exporter.sh b/hack/log-file-metric-exporter.sh deleted file mode 100755 index 4429f69..0000000 --- a/hack/log-file-metric-exporter.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash - -/usr/local/bin/log-file-metric-exporter -verbosity=2 -dir=/var/log/containers -http=:2112 diff --git a/hack/log-generator.yaml b/hack/log-generator.yaml new file mode 100644 index 0000000..afff0e4 --- /dev/null +++ b/hack/log-generator.yaml @@ -0,0 +1,23 @@ +namespace: &ns my-project2 +apiVersion: v1 +kind: Namespace +metadata: + name: *ns + labels: + key1: "value1" + key2: "value2" +spec: + finalizers: + - kubernetes +--- +namespace: &ns my-project2 +apiVersion: v1 +kind: Pod +metadata: + name: log-generator + namespace: *ns +spec: + containers: + - name: log-generator + image: docker.io/library/busybox:1.31.1 + args: ["sh", "-c", "i=0; while true; do echo $i: Test message; echo $i: Info Test message info; echo $i: Error Test message error; echo $i: Notice Test message notice; i=$((i+1)) ; sleep 2; done"] diff --git a/hack/logwatcher.yaml b/hack/logwatcher.yaml new file mode 100644 index 0000000..7553433 --- /dev/null +++ b/hack/logwatcher.yaml @@ -0,0 +1,54 @@ +namespace: &ns my-project +apiVersion: v1 +kind: Namespace +metadata: + name: *ns + labels: + key1: "value1" + key2: "value2" +spec: + finalizers: + - kubernetes +--- +namespace: &ns my-project +apiVersion: v1 +kind: Pod +metadata: + name: logwatcher + namespace: *ns +spec: + volumes: + - hostPath: + path: /var/log/pods + type: "" + name: varlogpods + containers: + - name: logwatcher + image: image-registry.openshift-image-registry.svc:5000/openshift/logwatcher:v0.0.1 + imagePullPolicy: Always + # args: ["sh", "-c", "strace -e inotify_add_watch,inotify_rm_watch -f /usr/local/bin/logwatcher -watch_dir=/var/log/pods -v=5 -logtostderr=true"] + args: ["sh", "-c", "/usr/local/bin/logwatcher -watch_dir=/var/log/pods -v=3 -logtostderr=true"] + securityContext: + allowPrivilegeEscalation: true + capabilities: + add: ["SYS_PTRACE", "SYS_ADMIN"] + # drop: + # - CHOWN + # - DAC_OVERRIDE + # - FOWNER + # - FSETID + # - KILL + # - NET_BIND_SERVICE + # - SETGID + # - SETPCAP + # - SETUID + readOnlyRootFilesystem: true + seLinuxOptions: + type: spc_t + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /var/log/pods + name: varlogpods + readOnly: true + diff --git a/hack/run-linter b/hack/run-linter deleted file mode 100755 index 3a28fe4..0000000 --- a/hack/run-linter +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/sh - -set -euo pipefail - -# check dockerfile changes -change=$(./hack/generate-dockerfile-from-midstream | md5sum | cut -d ' ' -f1) -if [ "$change" != "$(md5sum Dockerfile | cut -d ' ' -f1)" ] ; then - echo "A change was found in CI file Dockerfile that was not sourced from the midstream file Dockerfile.in (or vice versa)." - echo "Please reset the CI file (e.g. Dockerfile), update Dockerfile.in, run make gen-dockerfiles and commit the results" - exit 1 -fi diff --git a/internal/inotify/event.go b/internal/inotify/event.go new file mode 100644 index 0000000..27c8e85 --- /dev/null +++ b/internal/inotify/event.go @@ -0,0 +1,36 @@ +package inotify + +import "golang.org/x/sys/unix" + +type NotifyEvent struct { + unix.InotifyEvent + path string +} + +func (ne NotifyEvent) IsCreate() bool { + return ne.Mask&unix.IN_CREATE == unix.IN_CREATE +} + +func (ne NotifyEvent) IsDelete() bool { + return ne.Mask&unix.IN_DELETE == unix.IN_DELETE +} + +func (ne NotifyEvent) IsModify() bool { + return ne.Mask&unix.IN_MODIFY == unix.IN_MODIFY +} + +func (ne NotifyEvent) IsCloseWrite() bool { + return ne.Mask&unix.IN_CLOSE_WRITE == unix.IN_CLOSE_WRITE +} + +func (ne NotifyEvent) IsIgnored() bool { + return ne.Mask&unix.IN_IGNORED == unix.IN_IGNORED +} + +func (ne NotifyEvent) IsOverFlowErr() bool { + return ne.Mask&unix.IN_Q_OVERFLOW == unix.IN_Q_OVERFLOW +} + +func (ne NotifyEvent) IsDir() bool { + return ne.Mask&unix.IN_ISDIR == unix.IN_ISDIR // Subject of this event is a directory. +} diff --git a/internal/inotify/inotify.go b/internal/inotify/inotify.go new file mode 100644 index 0000000..20877c8 --- /dev/null +++ b/internal/inotify/inotify.go @@ -0,0 +1,354 @@ +package inotify + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + "os" + "path/filepath" + "strings" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/golang/glog" + "golang.org/x/sys/unix" +) + +const ( + FlagsWatchDir = unix.IN_CREATE | // File/directory created in watched directory + unix.IN_DELETE // File/directory deleted from watched directory. + //unix.IN_DELETE_SELF // Watched file/directory was itself deleted. + + FlagsWatchFile = unix.IN_MODIFY | // File was modified (e.g., write(2), truncate(2)). + unix.IN_CLOSE_WRITE | // File opened for writing was closed. + unix.IN_ONESHOT // Monitor the filesystem object corresponding to pathname for one event, then remove from watch list. + + EventChanSize = 4096 + + RootDir = "/var/log/pods" + SelfDir = "logwatcher" +) + +var ( + sizemtx = sync.Mutex{} + Sizes = map[string]int64{} + HandledEvents uint64 = 0 + SentEvents uint64 = 0 +) + +type Notify struct { + rootDir string + namespaces map[string]struct{} + fd int + inotifyFile *os.File // used for read()ing events + watches map[string]int + paths map[int]string + mtx sync.RWMutex + events chan NotifyEvent +} + +func New(root string) (*Notify, error) { + fi, err := os.Stat(root) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, errors.New("input must be a directory") + } + fd, err := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) + if err != nil { + return nil, err + } + n := &Notify{ + rootDir: root, + fd: fd, + inotifyFile: os.NewFile(uintptr(fd), ""), + mtx: sync.RWMutex{}, + events: make(chan NotifyEvent, EventChanSize), + paths: map[int]string{}, + watches: map[string]int{}, + namespaces: map[string]struct{}{}, + } + return n, n.WatchDir(root) +} + +func (n *Notify) Start() { + n.WatchExistingLogs() + glog.V(0).Infoln("started....") + glog.Flush() + // Suggest to keep num of loops to 1, else events may be handled out of order + MaxEventLoops := 1 + wg := sync.WaitGroup{} + wg.Add(1) + go n.ReadLoop(&wg) + // TODO: remove the looping, need only one EventLoop goroutine + for i := 0; i < MaxEventLoops; i++ { + wg.Add(1) + go n.EventLoop(&wg, i) + } + wg.Wait() +} + +// ReadLoop reads the inotify fd and generates events +func (n *Notify) ReadLoop(wg *sync.WaitGroup) { + var ( + buf [unix.SizeofInotifyEvent * EventChanSize]byte // Buffer for a maximum of 4096 raw events + ) + defer func() { + close(n.events) + n.mtx.Lock() + for fd := range n.paths { + unix.InotifyRmWatch(n.fd, uint32(fd)) + } + n.mtx.Unlock() + n.inotifyFile.Close() + }() + + for { + readbytes, err := n.inotifyFile.Read(buf[:]) + if err != nil { + if errors.Unwrap(err) == io.EOF { + glog.Errorf("Received EOF on inotify file descriptor") + } + glog.Errorf("Error in ReadLoop. breaking the loop. err: %v", err) + break + } + if readbytes <= 0 { + glog.Errorf("readbytes <= 0. breaking the loop. readbytes: %d", readbytes) + break + } + events := 0 + consumed := 0 + var offset uint32 = 0 + for offset <= uint32(readbytes-unix.SizeofInotifyEvent) { + raw := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) + consumed += unix.SizeofInotifyEvent + path := string(buf[offset+unix.SizeofInotifyEvent : offset+unix.SizeofInotifyEvent+raw.Len]) + consumed += int(raw.Len) + offset += unix.SizeofInotifyEvent + raw.Len + /* + if raw.Mask&unix.IN_IGNORED == unix.IN_IGNORED { + continue + } + */ + e := NotifyEvent{ + InotifyEvent: *raw, + path: strings.TrimRight(path, "\x00"), + } + n.events <- e + events += 1 + atomic.AddUint64(&SentEvents, 1) + } + if readbytes-consumed != 0 { + glog.V(0).Infof("Read %d bytes, %d events, consumed %d bytes, remaining %d bytes", readbytes, events, consumed, (readbytes - consumed)) + } + } + wg.Done() + glog.V(0).Infoln("exiting ReadLoop") +} + +func (n *Notify) EventLoop(wg *sync.WaitGroup, idx int) { + var tickerCh <-chan time.Time + // start ticker in first goroutine only + if idx == 0 { + tickerCh = time.NewTicker(time.Minute * 1).C + } + for { + if glog.V(9) { + glog.Info("--------- going to select ---------") + } + select { + case e := <-n.events: + //handled := false + atomic.AddUint64(&HandledEvents, 1) + if e.IsOverFlowErr() { + glog.Exit("Overflow occured. Exiting program.") + } + n.mtx.RLock() + watchedPath, ok := n.paths[int(e.Wd)] + n.mtx.RUnlock() + if !ok { + glog.Errorf("A watch event received for an unknown watched path. Fd: %d, Event Mask: %x", e.Wd, e.Mask) + continue + } + + if glog.V(5) && e.Mask != 0x8000 { + glog.Infof("event for wd: %x, watchedPath: %q, for path: %q, mask is: %x", e.Wd, watchedPath, e.path, e.Mask) + } + + switch { + case e.IsIgnored(): + if glog.V(9) { + glog.Infof("Received an IN_IGNORED event. wd: %x, watchedPath: %q, path: %q, Mask: %x", e.Wd, watchedPath, e.path, e.Mask) + } + case e.IsCreate(): + if e.path != "" { + if e.IsDir() { + // a directory got created in a directory + newdir := filepath.Join(watchedPath, e.path) + if watchedPath == n.rootDir { + // a new namespace_pod directory got created, add a watch for this directory + glog.V(3).Infof("A new namespace_pod got created. namespace_pod: %q", newdir) + } else { + // a new container directory got created, add a watch for this directory + glog.V(3).Infof("A new container got created. container: %q", newdir) + } + must(n.WatchDir(newdir)) + } else { + // a logfile got created + logfile := filepath.Join(watchedPath, e.path) + // ignore files which are not log files + if strings.HasSuffix(logfile, ".log") { + glog.V(3).Infof("a new log file got created: %q\n", logfile) + must(n.WatchLogFile(logfile)) + err := UpdateFileSize(logfile) + if err != nil { + glog.Errorf("could not stat file: %q", logfile) + } + } else { + if glog.V(7) { + glog.Infof("A file was created which is not a log file. path: %q", logfile) + } + } + } + } else { + // there should'nt be anything here + glog.Errorf("unrecognized IN_CREATE event. wd: %x, watchedPath: %q, path: %q, Mask: %x", e.Wd, watchedPath, e.path, e.Mask) + } + case e.IsDelete(): + if e.path != "" { + if e.IsDir() { + // a directory got created in a directory + removeddir := filepath.Join(watchedPath, e.path) + if watchedPath == n.rootDir { + // a namespace_pod directory got deleted, remove watch for this directory + glog.V(3).Infof("A namespace_pod got deleted. namespace_pod: %q", removeddir) + } else { + // a container directory got deleted, remove watch for this directory + glog.V(3).Infof("A container got deleted. container: %q", removeddir) + } + must(n.RemoveWatchForPath(removeddir)) + } else { + // a file got deleted in a directory + logfile := filepath.Join(watchedPath, e.path) + glog.V(3).Infof("A log file got deleted %q", logfile) + // don't need to remove watch for the file because files are watched using IN_ONESHOT, and watch would have got removed when file was closed for writing. + must(n.RemoveWatchForPath(logfile)) + } + } else { + // there should'nt be anything here because delete notification should come on parent directory + glog.Errorf("unrecognized IN_DELETE event. wd: %x, watchedPath: %q, path: %q, Mask: %x", e.Wd, watchedPath, e.path, e.Mask) + } + case e.IsModify(): + if e.path != "" { + // this should not occur as no IN_MODIFY watch is placed for any directory + glog.Errorf("unrecognized IN_MODIFY event. wd: %x, watchedPath: %q, path: %q, Mask: %x", e.Wd, watchedPath, e.path, e.Mask) + } else { + // a log file got written + if glog.V(9) { + glog.Infof("logfile %q got written", watchedPath) + } + if err := UpdateFileSize(watchedPath); err != nil { + glog.Errorf("Error in doing stat for file: %q, err: %v", watchedPath, err) + } + // add a new watch for the file + must(n.WatchLogFile(watchedPath)) + } + case e.IsCloseWrite(): + if e.path != "" { + // this should not occur as no IN_CLOSE_WRITE watch is placed for any directory + glog.Errorf("unrecognized IN_CLOSE_WRITE event. wd: %x, watchedPath: %q, path: %q, Mask: %x", e.Wd, watchedPath, e.path, e.Mask) + } else { + // a log file opened for writing got closed + glog.V(3).Infof("logfile %q got closed for writing", watchedPath) + // update file size last time + if err := UpdateFileSize(watchedPath); err != nil { + glog.Errorf("Error in doing stat for file: %q, err: %v", watchedPath, err) + } + // add a new watch for the file because we want an event when it is written to again. + // If we avoid adding a watch here, we would need to add a IN_OPEN for its parent directory which leads to a large number of unwanted events + must(n.WatchLogFile(watchedPath)) + } + default: + glog.Errorf("unhandled event. wd: %x, watchedPath: %q, path: %q, Mask: %x", e.Wd, watchedPath, e.path, e.Mask) + } + + case <-tickerCh: + /**/ + sizemtx.Lock() + glog.V(0).Infof("sizes(%d): %s\nEventsSent: %d, EventsHandled: %d\n", len(Sizes), func(m map[string]int64) string { + p, err := json.MarshalIndent(m, "", " ") + if err != nil { + return fmt.Sprintf("%v", err) + } + return string(p) + }(Sizes), SentEvents, HandledEvents) + sizemtx.Unlock() + /**/ + /**/ + n.mtx.RLock() + wl := n.WatchList() + n.mtx.RUnlock() + + l := strings.Join(wl, ",\n") + glog.V(0).Infof("Watching paths: \n%s\nTotal watches: %d\n", l, len(wl)) + /**/ + } + } + wg.Done() + glog.V(3).Info("exiting Handleloop") +} + +func (n *Notify) WatchExistingLogs() { + filepath.WalkDir(n.rootDir, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if path == n.rootDir { + return nil + } + // do not watch self dir else results in a positive feedback loop + if d.Name() == SelfDir && d.IsDir() { + glog.V(0).Infof("skipping reading self dir: %s", SelfDir) + return filepath.SkipDir + } + glog.V(3).Infof("checking dir: %s", path) + + if d.IsDir() { + glog.V(0).Infof("watching directory %q", path) + return n.WatchDir(path) + } else { + err2 := n.WatchLogFile(path) + if err2 != nil { + return err2 + } + err2 = UpdateFileSize(path) + // if err2 != nil { + // n.RemoveWatch(0, path) + // } + return err2 + } + return nil + }) +} + +func UpdateFileSize(path string) error { + s, err := os.Stat(path) + if err != nil { + glog.Error("could not stat file: ", path, err) + return err + } + sizemtx.Lock() + Sizes[path] = s.Size() + sizemtx.Unlock() + return nil +} +func must(err error) { + if err != nil { + glog.Exit("Exiting with error: ", err) + } +} diff --git a/internal/inotify/watch.go b/internal/inotify/watch.go new file mode 100644 index 0000000..40f4c41 --- /dev/null +++ b/internal/inotify/watch.go @@ -0,0 +1,110 @@ +package inotify + +import ( + "errors" + "fmt" + "path/filepath" + "runtime/debug" + + "github.com/golang/glog" + "golang.org/x/sys/unix" +) + +func (n *Notify) WatchList() []string { + n.mtx.RLock() + defer n.mtx.RUnlock() + + entries := make([]string, 0, len(n.watches)) + for pathname, fd := range n.watches { + entries = append(entries, fmt.Sprintf("%6x, %q", fd, pathname)) + } + + return entries +} + +func (n *Notify) WatchDir(dir string) error { + n.mtx.Lock() + defer n.mtx.Unlock() + return n.watchPathWith(dir, FlagsWatchDir) +} + +func (n *Notify) WatchLogFile(path string) error { + n.mtx.Lock() + defer n.mtx.Unlock() + return n.watchPathWith(path, FlagsWatchFile) +} + +func (n *Notify) watchPathWith(path string, flags uint32) error { + // n.mtx is already held + oldfd := n.watches[path] + path = filepath.Clean(path) + wfd, err := unix.InotifyAddWatch(n.fd, path, flags) + if wfd == -1 { + glog.Errorf("error in watch for path: %q, err: %v\n", path, err) + debug.PrintStack() + return err + } + + count := 0 + for wfd == oldfd { + //glog.Errorf("count: %3d inotify returned the same fd as before. So adding watch again. oldfd: %x, newfd: %x", count, oldfd, wfd) + wfd, err = unix.InotifyAddWatch(n.fd, path, flags) + if wfd == -1 { + glog.Errorf("error in watch for path: %q, err: %v\n", path, err) + debug.PrintStack() + return err + } + count += 1 + if count == 1000 { + return errors.New("----------- inotify has gone mad -------------------") + } + } + if count != 0 && glog.V(3) { + glog.Infof("After %3d reties: added watch for path: %q, wd: %x", count, path, wfd) + } + n.watches[path] = wfd + n.paths[wfd] = path + return nil +} + +func (n *Notify) RemoveWatch(wfd int, path string) error { + n.mtx.Lock() + defer n.mtx.Unlock() + return n.removeWatch(wfd, path) +} + +func (n *Notify) RemoveWatchForPath(path string) error { + n.mtx.Lock() + defer n.mtx.Unlock() + wd, ok := n.watches[path] + if !ok { + glog.Errorf("Could not find watch descriptor for path %q", path) + return nil + } + return n.removeWatch(wd, path) +} + +func (n *Notify) removeWatch(wfd int, path string) error { + /* + ret, err := unix.InotifyRmWatch(n.fd, uint32(wfd)) + if ret == -1 { + glog.V(0).Infof("error in watch for path: %q, err: %v\n", path, err) + debug.PrintStack() + return err + } + */ + if glog.V(5) { + glog.Infof("removed watch for path: %q\n", path) + } + delete(n.watches, path) + delete(n.paths, wfd) + /* + entries := make([]string, 0, len(n.watches)) + for pathname := range n.watches { + entries = append(entries, pathname) + } + l := strings.Join(entries, ",\n") + glog.V(3).Infof("Watching \n%s\n", l) + */ + return nil +} diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go new file mode 100644 index 0000000..b900023 --- /dev/null +++ b/internal/metrics/metrics.go @@ -0,0 +1,97 @@ +package metrics + +import ( + "errors" + "regexp" + "strconv" + + "github.com/prometheus/client_golang/prometheus" +) + +type Metrics struct { + totalLoggedBytes *prometheus.CounterVec + newMetric *prometheus.GaugeVec +} + +var logFileRegex = regexp.MustCompile(`/var/log/pods/(?P[a-z0-9-]+)_(?P[a-z0-9-]+)_(?P[a-z0-9-]{32})/(?P[a-z0-9-]+)/(?P[0-9]*).log(\.{0,1}(?P[\d]{8}-[\d]{6})){0,1}(\.gz){0,1}`) + +func New() *Metrics { + return &Metrics{ + // =================== + // Proposed New Metric + // =================== + // This is a Gauge because we don't get the incremental value in log size, we get the actual value. + // To make it a Counter we will need to save current size in memory, and update the counter using the diff. + // We could avoid this state in memory by having the actual size of the file as a metric labelled as below. + // And use a separate query to get the total number of bytes written by the container. + // + // Example: + // A container got restarted twice, and in third iteration the log file got rotated thrice. The metrics will be as follows: + // + // {"namespace": "N", "podname": "P", "poduuid":"U", "containername":"C", "restartCount":0,"timestamp":""} 2770 + // {"namespace": "N", "podname": "P", "poduuid":"U", "containername":"C", "restartCount":1,"timestamp":""} 8768 + // {"namespace": "N", "podname": "P", "poduuid":"U", "containername":"C", "restartCount":2,"timestamp":"20230105-114647"} 98098098 + // {"namespace": "N", "podname": "P", "poduuid":"U", "containername":"C", "restartCount":2,"timestamp":"20230105-114647"} 6876876 + // {"namespace": "N", "podname": "P", "poduuid":"U", "containername":"C", "restartCount":2,"timestamp":"20230105-114647"} 9879987 + // {"namespace": "N", "podname": "P", "poduuid":"U", "containername":"C", "restartCount":2,"timestamp":""} 98769 + // + // The total size of logged bytes in current generation(restartCount) would be sum of last 4 metrics + // + // When log gets written, we know above values by parsing the filename. The metrics which will be updated for timestamp="". When + // logfile gets rotated, we receive a notification from inotify (not tested or coded yet). on this notification, create a new metric + // with timestamp from filename, and value as the current value of metric. Restart the same metric from 0. + newMetric: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "log_logged_bytes_total", + Help: "Total number of bytes written to a single log file path, accounting for rotations for a ", + }, []string{"namespace", "podname", "poduuid", "containername", "restartCount", "timestamp"}), + + // Old (existing) metric + totalLoggedBytes: prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: "log_logged_bytes_total", + Help: "Total number of bytes written to a single log file path, accounting for rotations", + }, []string{"namespace", "podname", "poduuid", "containername"}), + } +} + +type ParsedLogFile struct { + Namespace string + Pod string + UUID string + Container string + RestartCount int + Timespamp string + IsArchived bool +} + +func (m *Metrics) parse(logFilePath string) (ParsedLogFile, error) { + matches := logFileRegex.FindStringSubmatch(logFilePath) + if len(matches) != 9 { + return ParsedLogFile{}, errors.New("failed to parse log file path") + } + restartCount, err := strconv.Atoi(matches[5]) + if err != nil { + return ParsedLogFile{}, errors.New("failed to parse log file path") + } + return ParsedLogFile{ + Namespace: matches[1], + Pod: matches[2], + UUID: matches[3], + Container: matches[4], + RestartCount: restartCount, + Timespamp: matches[7], // matches[6] corresponds to . + IsArchived: (matches[8] == ".gz"), + }, nil +} + +func (m *Metrics) UpdateMetric(filepath string) error { + var ( + p ParsedLogFile + err error + ) + if p, err = m.parse(filepath); err != nil { + return err + } + prometheus.MustNewConstMetric(nil, prometheus.GaugeValue, float64(12), "") + _ = p + return nil +} diff --git a/internal/metrics/metrics_test.go b/internal/metrics/metrics_test.go new file mode 100644 index 0000000..5bd7886 --- /dev/null +++ b/internal/metrics/metrics_test.go @@ -0,0 +1,78 @@ +package metrics + +import ( + "strconv" + "testing" +) + +func TestRegex(t *testing.T) { + + type test struct { + ParsedLogFile + path string + } + + tests := []test{ + { + path: "/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc-pbwlw-master-0_738a9f84e9aed99070694fd38123a679/cluster-version-operator/2.log", + ParsedLogFile: ParsedLogFile{ + Namespace: "openshift-kube-controller-manager", + Pod: "kube-controller-manager-crc-pbwlw-master-0", + UUID: "738a9f84e9aed99070694fd38123a679", + Container: "cluster-version-operator", + RestartCount: 2, + }, + }, + { + path: "/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc-pbwlw-master-0_738a9f84e9aed99070694fd38123a679/cluster-version-operator/0.log.20230102-180708", + ParsedLogFile: ParsedLogFile{ + Namespace: "openshift-kube-controller-manager", + Pod: "kube-controller-manager-crc-pbwlw-master-0", + UUID: "738a9f84e9aed99070694fd38123a679", + Container: "cluster-version-operator", + RestartCount: 0, + Timespamp: "20230102-180708", + }, + }, + { + path: "/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc-pbwlw-master-0_738a9f84e9aed99070694fd38123a679/cluster-version-operator/2.log.20230105-030511.gz", + ParsedLogFile: ParsedLogFile{ + Namespace: "openshift-kube-controller-manager", + Pod: "kube-controller-manager-crc-pbwlw-master-0", + UUID: "738a9f84e9aed99070694fd38123a679", + Container: "cluster-version-operator", + RestartCount: 2, + Timespamp: "20230105-030511", + IsArchived: true, + }, + }, + } + for _, tc := range tests { + matches := logFileRegex.FindStringSubmatch(tc.path) + if len(matches) != 9 { + t.Errorf("regex matches mismatched. want: 9, have: %d", len(matches)) + } + if tc.Namespace != matches[1] { + t.Errorf("namespace want: %s, have: %s", tc.Namespace, matches[1]) + } + if tc.Pod != matches[2] { + t.Errorf("pod want: %s, have: %s", tc.Pod, matches[2]) + } + if tc.UUID != matches[3] { + t.Errorf("uuid want: %s, have: %s", tc.UUID, matches[3]) + } + if tc.Container != matches[4] { + t.Errorf("container want: %s, have: %s", tc.Container, matches[4]) + } + restartCount, _ := strconv.Atoi(matches[5]) + if tc.RestartCount != restartCount { + t.Errorf("restartCount want: %d, have: %d", tc.RestartCount, restartCount) + } + if tc.Timespamp != matches[7] { + t.Errorf("timespamp want: %s, have: %s", tc.Timespamp, matches[7]) + } + if tc.IsArchived != (matches[8] == ".gz") { + t.Errorf("zip file want: %v, have: %v", tc.IsArchived, (matches[8] == ".gz")) + } + } +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..d167e95 --- /dev/null +++ b/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "flag" + "os" + "path/filepath" + + "github.com/ViaQ/logwatcher/internal/inotify" + "github.com/golang/glog" +) + +var ( + watchDirFlag = flag.String("watch_dir", func() string { + wd, _ := os.Getwd() + return wd + }(), "directory to watch for logs") +) + +func main() { + flag.Parse() + watchDir, err := filepath.Abs(*watchDirFlag) + if err != nil { + glog.Exit("error in arguments.", os.Args) + } + d, err := os.Stat(watchDir) + if err != nil { + glog.Exitf("Error occured in inputs. Error: %v", err) + return + } + if !d.IsDir() { + glog.Exitf("watch_dir must be a directory.") + return + } + glog.V(0).Infof("Watching directory %s", watchDir) + n, err := inotify.New(watchDir) + if err != nil { + glog.Exit("error in starting watcher: ", err) + os.Exit(-1) + } + + n.Start() + + glog.Info("Bye..") +} diff --git a/origin-meta.yaml b/origin-meta.yaml deleted file mode 100644 index 8555435..0000000 --- a/origin-meta.yaml +++ /dev/null @@ -1,5 +0,0 @@ -from: -- source: registry-proxy.engineering.redhat.com/rh-osbs/openshift-golang-builder\:v(?:[\.0-9\-]*).* - target: registry.access.redhat.com/ubi8/go-toolset AS builder -- source: registry.redhat.io/ubi8:8.(\d)-([\.0-9])* - target: registry.access.redhat.com/ubi8 diff --git a/pkg/logwatch/log_labels_test.go b/pkg/logwatch/log_labels_test.go deleted file mode 100644 index d5092d8..0000000 --- a/pkg/logwatch/log_labels_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package logwatch - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestParseLogLabels(t *testing.T) { - path := "/var/log/pods/test-qegihyox_functional_19b40c1b-df6d-4e63-b5aa-d6c5ed20ac4e/something/0.log" - var l LogLabels - assert.True(t, l.Parse(path)) - want := LogLabels{ - Namespace: "test-qegihyox", - Name: "functional", - UUID: "19b40c1b-df6d-4e63-b5aa-d6c5ed20ac4e", - Container: "something", - } - assert.Equal(t, want, l) -} diff --git a/pkg/logwatch/watcher.go b/pkg/logwatch/watcher.go deleted file mode 100644 index 2216406..0000000 --- a/pkg/logwatch/watcher.go +++ /dev/null @@ -1,168 +0,0 @@ -// Package logwatch watches Pod log files and updates metrics. -package logwatch - -import ( - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "sync" - - "github.com/ViaQ/logerr/log" - "github.com/fsnotify/fsnotify" - "github.com/log-file-metric-exporter/pkg/symnotify" - "github.com/prometheus/client_golang/prometheus" -) - -var logFile = regexp.MustCompile(`/([a-z0-9-]+)_([a-z0-9-]+)_([a-f0-9-]+)/([a-z0-9-]+)/.*\.log`) - -// LogLabels are the labels for a Pod log file. -// -// NOTE: The log Path is not a label because it includes a variable "n.log" part that changes -// over the life of the same container. -type LogLabels struct { - Namespace, Name, UUID, Container string -} - -func (l *LogLabels) Parse(path string) (ok bool) { - match := logFile.FindStringSubmatch(path) - if match != nil { - l.Namespace, l.Name, l.UUID, l.Container = match[1], match[2], match[3], match[4] - return true - } - return false -} - -type Watcher struct { - watcher *symnotify.Watcher - metrics *prometheus.CounterVec - sizes map[LogLabels]float64 - mutex sync.RWMutex -} - -func New(dir string) (*Watcher, error) { - log.V(3).Info("Initializing a new watcher...") - //Get new watcher - watcher, err := symnotify.NewWatcher() - if err != nil { - return nil, fmt.Errorf("error creating watcher: %w", err) - } - w := &Watcher{ - watcher: watcher, - metrics: prometheus.NewCounterVec(prometheus.CounterOpts{ - Name: "log_logged_bytes_total", - Help: "Total number of bytes written to a single log file path, accounting for rotations", - }, []string{"namespace", "podname", "poduuid", "containername"}), - sizes: make(map[LogLabels]float64), - mutex: sync.RWMutex{}, - } - - log.V(3).Info("Registering counter", "metrics", w.metrics) - if err := prometheus.Register(w.metrics); err != nil { - return nil, fmt.Errorf("error registering metrics: %w", err) - } - log.V(3).Info("Walking watch dir", "dir", dir) - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { return w.Update(path) }) - if err != nil { - return nil, err - } - err = w.watcher.Add(dir) - if err != nil { - return nil, fmt.Errorf("error watching directory %v: %w", dir, err) - } - return w, nil -} - -func (w *Watcher) Close() { - w.watcher.Close() - prometheus.Unregister(w.metrics) -} - -func (w *Watcher) Forget(path string) { - log.V(3).Info("Watcher#Forget", "path", path) - var l LogLabels - if l.Parse(path) { - defer w.mutex.Unlock() - w.mutex.Lock() - delete(w.sizes, l) // Clean up sizes entry - _ = w.metrics.DeleteLabelValues(l.Namespace, l.Name, l.UUID, l.Container) - } -} - -func (w *Watcher) Watch() error { - for { - max := 5 - wg := sync.WaitGroup{} - wg.Add(max) - for i := 1; i <= max; i++ { - go w.processNextEvent(&wg) - } - wg.Wait() - } - return nil -} -func (w *Watcher) processNextEvent(wg *sync.WaitGroup) { - defer wg.Done() - e, err := w.watcher.Event() - log.V(3).Info("logwatch.Watcher#Watch", "path", e.Name, "event", e.Op.String()) - switch { - case err == io.EOF: - return - case err != nil: - log.Error(err, "Error retrieving watch event") - case e.Op == fsnotify.Remove: - w.Forget(e.Name) - default: - if err = w.Update(e.Name); err != nil { - log.V(4).Error(err, "Error during Watcher#Update", "path", e.Name, "event", e.Op.String()) - } - } -} - -func (w *Watcher) Update(path string) (err error) { - log.V(3).Info("Watcher#Update", "path", path) - defer func() { - if os.IsNotExist(err) { - w.Forget(path) - err = nil // Not an error if a file disappears - } - if err != nil { - log.Error(err, "error updating metric", "path", path) - } - }() - - var l LogLabels - if !l.Parse(path) { - log.V(3).Info("Unable to parse path for LogLabels. returning early from update", "path", path) - return nil - } - counter, err := w.metrics.GetMetricWithLabelValues(l.Namespace, l.Name, l.UUID, l.Container) - if err != nil { - return err - } - stat, err := os.Stat(path) - if err != nil { - return err - } - if stat.IsDir() { - log.V(3).Info("Ignoring path given it is a directory", "path", path) - return nil // Ignore directories - } - defer w.mutex.Unlock() - w.mutex.Lock() - lastSize, size := w.sizes[l], float64(stat.Size()) - log.V(3).Info("Stats", "path", path, "lastSize", lastSize, "size", size) - w.sizes[l] = size - var add float64 - if size > lastSize { - // File has grown, add the difference to the counter. - add = size - lastSize - } else if size < lastSize { - // File truncated, starting over. Add the size. - add = size - } - log.V(3).Info("updated metric", "path", path, "lastsize", lastSize, "currentsize", size, "addedbytes", add) - counter.Add(add) - return nil -} diff --git a/pkg/logwatch/watcher_test.go b/pkg/logwatch/watcher_test.go deleted file mode 100644 index ebcfa8e..0000000 --- a/pkg/logwatch/watcher_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package logwatch - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus" - dto "github.com/prometheus/client_model/go" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/ViaQ/logerr/log" -) - -const ( - logname = "openshift-monitoring_prometheus-k8s-0_9a5888d1-e009-4cc3-bc19-c5543b4b84f7/kube-rbac-proxy-thanos/2.log" - data = "hello\n" -) - -func setup(t *testing.T, initLog func(string)) (watcher *Watcher, path string, labels LogLabels) { - t.Helper() - dir, err := ioutil.TempDir("", t.Name()) - require.NoError(t, err) - t.Cleanup(func() { - log.V(4).Info("Running test cleanup...removing dir", "dir", dir) - _ = os.RemoveAll(dir) - }) - require.NoError(t, err) - path = filepath.Join(dir, logname) - require.True(t, labels.Parse(path)) - os.MkdirAll(filepath.Dir(path), 0700) - if initLog != nil { - initLog(path) - } - watcher, err = New(dir) - require.NoError(t, err) - go watcher.Watch() - t.Cleanup(func() { watcher.Close() }) - return watcher, path, labels -} - -func getCounterValue(c prometheus.Counter) float64 { - m := &dto.Metric{} - if err := c.Write(m); err != nil { - return 0 - } - return m.Counter.GetValue() -} - -func TestWatcherSeesFileChange(t *testing.T) { - w, path, l := setup(t, nil) - - counter, err := w.metrics.GetMetricWithLabelValues(l.Namespace, l.Name, l.UUID, l.Container) - require.NoError(t, err) - - assert.Eventually(t, - func() bool { - require.NoError(t, ioutil.WriteFile(path, []byte(data), 0600)) - return float64(len(data)) == getCounterValue(counter) - }, - time.Second, time.Second/10, "%v != %v", len(data), getCounterValue(counter)) - - assert.NoError(t, os.Remove(path)) - assert.Eventually(t, - func() bool { - counter, err := w.metrics.GetMetricWithLabelValues(l.Namespace, l.Name, l.UUID, l.Container) - require.NoError(t, err) - return getCounterValue(counter) == 0 - }, - time.Second, time.Second/10, "%v != 0", len(data), getCounterValue(counter)) -} -func TestWatcherSeesAndWatchesExistingFiles(t *testing.T) { - w, path, l := setup(t, func(path string) { - writeToFile(t, path) - require.NoError(t, ioutil.WriteFile(path, []byte(data), 0600)) - }) - - counter, err := w.metrics.GetMetricWithLabelValues(l.Namespace, l.Name, l.UUID, l.Container) - require.NoError(t, err) - // assert we see the initial file size - assert.Eventually(t, - func() bool { - v := getCounterValue(counter) - log.V(3).Info("initial size", "counter", v) - return float64(len(data)) == v - }, - time.Second, time.Second/10, "%v != %v", len(data), getCounterValue(counter)) - - writeToFile(t, path) - writeToFile(t, path) - // assert we see the change in the file size - assert.Eventually(t, - func() bool { - v := getCounterValue(counter) - log.V(3).Info("size after write", "counter", v) - return float64(3*len(data)) == v - }, - time.Second, time.Second/10, "%v != %v", 3*len(data), getCounterValue(counter)) -} - -func writeToFile(t *testing.T, path string) { - f, err := os.OpenFile(path, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0600) - require.NoError(t, err) - _, err = f.Write([]byte(data)) - require.NoError(t, err) -} diff --git a/pkg/symnotify/symnotify.go b/pkg/symnotify/symnotify.go deleted file mode 100644 index 0db4e47..0000000 --- a/pkg/symnotify/symnotify.go +++ /dev/null @@ -1,118 +0,0 @@ -// package symnotify provides a file system watcher that notifies events for symlink targets. -package symnotify - -import ( - "errors" - "io" - "io/fs" - "io/ioutil" - "os" - "path/filepath" - - "github.com/ViaQ/logerr/log" - "github.com/fsnotify/fsnotify" -) - -type Event = fsnotify.Event -type Op = fsnotify.Op - -const ( - Create Op = fsnotify.Create - Write = fsnotify.Write - Remove = fsnotify.Remove - Rename = fsnotify.Rename - Chmod = fsnotify.Chmod -) - -// Watcher is like fsnotify.Watcher but also notifies on changes to symlink targets -type Watcher struct { - watcher *fsnotify.Watcher -} - -func NewWatcher() (*Watcher, error) { - w, err := fsnotify.NewWatcher() - return &Watcher{watcher: w}, err -} - -// Event returns the next event or an error. -func (w *Watcher) Event() (e Event, err error) { - var ok bool - select { - case e, ok = <-w.watcher.Events: - case err, ok = <-w.watcher.Errors: - } - if !ok { - err = io.EOF - } - if err != nil { - return Event{}, err - } - log.V(3).Info("event", "path", e.Name, "operation", e.Op.String()) - switch { - case e.Op == Create: - var info os.FileInfo - if info, err = os.Lstat(e.Name); err == nil { - if isSymlink(info) || info.IsDir() { - err = w.Add(e.Name) - } - } - case e.Op == Remove: - err = w.watcher.Remove(e.Name) - case e.Op == Chmod || e.Op == Rename: - var info os.FileInfo - if info, err = os.Lstat(e.Name); err == nil { - if isSymlink(info) { - // Symlink target may have changed. - err = w.watcher.Remove(e.Name) - err = w.watcher.Add(e.Name) - } - } - } - if err != nil { - if !errors.Is(err, fsnotify.ErrNonExistentWatch) { - log.Error(err, "Error retrieving event", "path", e.Name, "operation", e.Op.String()) - } - } - return e, nil -} - -// Remove name from watcher -func (w *Watcher) Remove(name string) error { - log.V(3).Info("stop watching", "path", name) - return w.watcher.Remove(name) -} - -// Add a new directory, file or symlink to be watched. -func (w *Watcher) Add(name string) (err error) { - log.V(3).Info("start watching", "path", name) - if err = w.watcher.Add(name); err != nil { - log.Error(err, "error watching", "path", name) - return err - } - // If name is a directory, scan for existing symlinks and sub-directories to watch. - var infos []fs.FileInfo - if infos, err = ioutil.ReadDir(name); err == nil { - for _, info := range infos { - log.V(3).Info("Checking path for more files to watch", "name", name, "subpath", info.Name()) - newName := filepath.Join(name, info.Name()) - switch { - case info.IsDir(): - if e := w.Add(newName); e != nil { - log.Error(e, "Error path to watch", "path", newName) - } - case isSymlink(info): - if e := w.watcher.Add(newName); e != nil { - log.Error(e, "Error for symnotify#Add", "path", newName) - } - } - } - } - return err -} - -// Close watcher -func (w *Watcher) Close() error { return w.watcher.Close() } - -func isSymlink(info os.FileInfo) bool { - return (info.Mode() & os.ModeSymlink) == os.ModeSymlink -} diff --git a/pkg/symnotify/symnotify_benchmark_test.go b/pkg/symnotify/symnotify_benchmark_test.go deleted file mode 100644 index d99f3f5..0000000 --- a/pkg/symnotify/symnotify_benchmark_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package symnotify_test - -import ( - "fmt" - "path/filepath" - "testing" - - "os" - - "math/rand" - - "github.com/log-file-metric-exporter/pkg/symnotify" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// To check for memory growth run this benchmark for a longer time, e.g. -// go test -memprofile=mem.prof -run X -bench BenchmarkStress -benchtime=5m -func BenchmarkStress(b *testing.B) { - dir := b.TempDir() - w, err := symnotify.NewWatcher() - require.NoError(b, err) - require.NoError(b, w.Add(dir)) - files := make([]*os.File, 512) - for i := 0; i < b.N; i++ { - n := rand.Intn(len(files)) - f := files[n] - if f == nil { // Create if file not present - f, err = os.Create(filepath.Join(dir, fmt.Sprintf("log%d", n))) - files[n] = f - require.NoError(b, err) - } else { - p := rand.Intn(100) - if p < 10 { // Remove 10% of the time - _ = f.Close() - require.NoError(b, os.Remove(f.Name())) - files[n] = nil - } else { // Write 90% of the time - f.Write([]byte("hello\n")) - } - } - // Consume the event - e, err := w.Event() - require.NoError(b, err) - assert.Equal(b, f.Name(), e.Name) - } -} diff --git a/pkg/symnotify/symnotify_test.go b/pkg/symnotify/symnotify_test.go deleted file mode 100644 index eeeb2e1..0000000 --- a/pkg/symnotify/symnotify_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package symnotify_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/log-file-metric-exporter/pkg/symnotify" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var Join = filepath.Join - -type Fixture struct { - T *testing.T - Root, Logs, Targets string - Watcher *symnotify.Watcher -} - -func NewFixture(t *testing.T) *Fixture { - t.Helper() - f := &Fixture{T: t} - - var err error - f.Root = t.TempDir() - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(f.Root) }) - - f.Logs = Join(f.Root, "logs") - f.Targets = Join(f.Root, "targets") - for _, dir := range []string{f.Logs, f.Targets} { - require.NoError(t, os.Mkdir(dir, os.ModePerm)) - } - f.Watcher, err = symnotify.NewWatcher() - require.NoError(t, err) - t.Cleanup(func() { f.Watcher.Close() }) - return f -} - -func (f *Fixture) Create(name string) (string, *os.File) { - f.T.Helper() - file, err := os.Create(name) - require.NoError(f.T, err) - f.T.Cleanup(func() { _ = file.Close() }) - return name, file -} - -func (f *Fixture) Mkdir(name string) { - f.T.Helper() - err := os.Mkdir(name, 0777) - require.NoError(f.T, err) -} - -func (f *Fixture) Link(name string) (string, *os.File) { - f.T.Helper() - target, file := f.Create(Join(f.Targets, name)) - link := Join(f.Logs, name) - require.NoError(f.T, os.Symlink(target, link)) - return link, file -} - -func (f *Fixture) Event() symnotify.Event { - f.T.Helper() - e, err := f.Watcher.Event() - require.NoError(f.T, err) - return e -} - -func TestCreateWriteRemove(t *testing.T) { - f := NewFixture(t) - assert, require := assert.New(t), require.New(t) - // Create file before starting watcher - log1, f1 := f.Create(Join(f.Logs, "log1")) - require.NoError(f.Watcher.Add(f.Logs)) - // Create log after starting watcher - log2, _ := f.Create(Join(f.Logs, "log2")) - assert.Equal(f.Event(), symnotify.Event{Name: log2, Op: symnotify.Create}) - - _, err := f1.Write([]byte("hello\n")) - assert.NoError(err) - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Write}) - - assert.NoError(os.Remove(log1)) - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Remove}) - assert.NoError(os.Remove(log2)) - assert.Equal(f.Event(), symnotify.Event{Name: log2, Op: symnotify.Remove}) -} - -func TestWatchesRealFiles(t *testing.T) { - f := NewFixture(t) - assert, require := assert.New(t), require.New(t) - - // Create file before starting watcher - log1, file1 := f.Create(Join(f.Logs, "log1")) - require.NoError(f.Watcher.Add(f.Logs)) - // Create log after starting watcher - log2, file2 := f.Create(Join(f.Logs, "log2")) - assert.Equal(f.Event(), symnotify.Event{Name: log2, Op: symnotify.Create}) - - // Write to real logs, check Events. - nw, errw := file1.Write([]byte("hello1")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Write}) - } - errt := file1.Truncate(0) - if errt == nil { - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Write}) - } - nw, errw = file2.Write([]byte("hello2")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: log2, Op: symnotify.Write}) - } - - // Delete and rename real files - newlog1 := Join(f.Logs, "newlog1") - assert.NoError(os.Rename(log1, newlog1)) - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Rename}) - assert.Equal(f.Event(), symnotify.Event{Name: newlog1, Op: symnotify.Create}) - - assert.NoError(os.Remove(log2)) - assert.Equal(f.Event(), symnotify.Event{Name: log2, Op: symnotify.Remove}) - - nw, errw = file1.Write([]byte("x")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: newlog1, Op: symnotify.Write}) - } -} - -func TestWatchesSymlinks(t *testing.T) { - f := NewFixture(t) - assert, require := assert.New(t), require.New(t) - // Create link before starting watcher - link1, file1 := f.Link("log1") - require.NoError(f.Watcher.Add(f.Logs)) - link2, file2 := f.Link("log2") - assert.Equal(f.Event(), symnotify.Event{Name: link2, Op: symnotify.Create}) - - // Write to files, check Events on links. - nw1, errw1 := file1.Write([]byte("hello")) - if errw1 == nil && nw1 > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: link1, Op: symnotify.Write}) - } - errt := file1.Truncate(0) - if errt == nil { - assert.Equal(f.Event(), symnotify.Event{Name: link1, Op: symnotify.Write}) - } - nw2, errw2 := file2.Write([]byte("hello")) - if errw2 == nil && nw2 > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: link2, Op: symnotify.Write}) - } - errch := file2.Chmod(0444) - if errch == nil { - assert.Equal(f.Event(), symnotify.Event{Name: link2, Op: symnotify.Chmod}) - } - - // Rename and remove symlinks - newlink1 := Join(f.Logs, "newlog1") - assert.NoError(os.Rename(link1, newlink1)) - assert.Equal(f.Event(), symnotify.Event{Name: link1, Op: symnotify.Rename}) - assert.Equal(f.Event(), symnotify.Event{Name: newlink1, Op: symnotify.Create}) - - assert.NoError(os.Remove(link2)) - assert.Equal(f.Event(), symnotify.Event{Name: link2, Op: symnotify.Remove}) - - nw3, errw3 := file1.Write([]byte("x")) - if errw3 == nil && nw3 > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: newlink1, Op: symnotify.Write}) - } -} - -func TestWatchesSymlinkTargetsChanged(t *testing.T) { - f := NewFixture(t) - assert, require := assert.New(t), require.New(t) - require.NoError(f.Watcher.Add(f.Logs)) - link, _ := f.Link("log") - assert.Equal(f.Event(), symnotify.Event{Name: link, Op: symnotify.Create}) - - // Replace link target with a new file. - target := Join(f.Targets, "log") - tempname, tempfile := f.Create(Join(f.Targets, "temp")) - assert.NoError(os.Rename(tempname, target)) - assert.Equal(f.Event(), symnotify.Event{Name: link, Op: symnotify.Chmod}) - nw, errw := tempfile.Write([]byte("temp")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: link, Op: symnotify.Write}) - } - got, err := ioutil.ReadFile((link)) - assert.NoError(err) - assert.Equal(string(got), "temp") -} - -func TestCreateRemoveEmpty(t *testing.T) { - f := NewFixture(t) - assert, require := assert.New(t), require.New(t) - require.NoError(f.Watcher.Add(f.Logs)) - name, file := f.Create(Join(f.Logs, "foo")) - _, err := file.Write([]byte("x")) - assert.Equal(f.Event(), symnotify.Event{Name: name, Op: symnotify.Create}) - assert.Equal(f.Event(), symnotify.Event{Name: name, Op: symnotify.Write}) - file.Close() - file, err = os.Open(name) - assert.NoError(err) - file.Close() - require.NoError(os.Remove(name)) - file, err = os.Open(name) - require.Error(err) - assert.Equal(f.Event(), symnotify.Event{Name: name, Op: symnotify.Remove}) -} - -func TestWatchesSubdirectories(t *testing.T) { - f := NewFixture(t) - assert, require := assert.New(t), require.New(t) - - // Create file in subdir before starting watcher - f.Mkdir(Join(f.Logs, "dir1")) - log1, file1 := f.Create(Join(f.Logs, "dir1", "log1")) - log2, file2 := f.Create(Join(f.Logs, "dir1", "log2")) - f.Mkdir(Join(f.Logs, "dir1", "dir2")) - log3, file3 := f.Create(Join(f.Logs, "dir1", "dir2", "log3")) - require.NoError(f.Watcher.Add(f.Logs)) - - // Create log after starting watcher - log4, file4 := f.Create(Join(f.Logs, "dir1", "dir2", "log4")) - assert.Equal(f.Event(), symnotify.Event{Name: log4, Op: symnotify.Create}) - - // Write to logs, check Events. - nw, errw := file1.Write([]byte("hello1")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Write}) - } - errt := file1.Truncate(0) - if errt == nil { - assert.Equal(f.Event(), symnotify.Event{Name: log1, Op: symnotify.Write}) - } - nw, errw = file2.Write([]byte("hello2")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: log2, Op: symnotify.Write}) - } - nw, errw = file3.Write([]byte("hello3")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: log3, Op: symnotify.Write}) - } - - nw, errw = file4.Write([]byte("hello4")) - if errw == nil && nw > 0 { - assert.Equal(f.Event(), symnotify.Event{Name: log4, Op: symnotify.Write}) - } -} diff --git a/test/scraper/scraper.go b/test/scraper/scraper.go deleted file mode 100644 index 79fc70c..0000000 --- a/test/scraper/scraper.go +++ /dev/null @@ -1,73 +0,0 @@ -// Package scraper is a simple client to scrape and parse prometheus metrics. -// Intended for testing. -package scraper - -import ( - "crypto/tls" - "fmt" - "net/http" - "time" - - dto "github.com/prometheus/client_model/go" - "github.com/prometheus/common/expfmt" -) - -// Scraper scrapes metrics from a HTTP endpoint -type Scraper struct { - Client *http.Client - Retries int - Interval time.Duration -} - -func New() *Scraper { - return &Scraper{ - Client: &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - }, - }, - Retries: 10, - Interval: time.Second, - } -} - -// Scrape the url, return the parsed metrics. -func (s *Scraper) Scrape(url string) (map[string]*dto.MetricFamily, error) { - resp, err := s.Client.Get(url) - for i := 0; i < s.Retries && err != nil; i++ { - time.Sleep(s.Interval) - resp, err = s.Client.Get(url) - } - if err != nil { - return nil, err - } - if resp.StatusCode/100 != 2 { - return nil, fmt.Errorf("scrape error: %v: %v", resp.Status, url) - } - if resp.Body == nil { - return nil, fmt.Errorf("scrape error: response has no body: %v", url) - } - parser := expfmt.TextParser{} - return parser.TextToMetricFamilies(resp.Body) -} - -func FindMetric(mf *dto.MetricFamily, label, value string) *dto.Metric { - for _, m := range mf.Metric { - for _, lp := range m.Label { - if *lp.Name == label && *lp.Value == value { - return m - } - } - } - return nil -} - -func Labels(m *dto.Metric) map[string]string { - labels := map[string]string{} - for _, kv := range m.Label { - labels[kv.GetName()] = kv.GetValue() - } - return labels -}