diff --git a/input/postgres/log_pg_read_file.go b/input/postgres/log_pg_read_file.go index 810836d8..ad96cca2 100644 --- a/input/postgres/log_pg_read_file.go +++ b/input/postgres/log_pg_read_file.go @@ -82,7 +82,7 @@ func LogPgReadFile(ctx context.Context, server *state.Server, globalCollectionOp for _, fileName := range fileNames { if err != nil { err = fmt.Errorf("LogFileSql/Scan: %s", err) - goto ErrorCleanup + return server.LogPrevState, nil, nil, err } var logData string var newOffset int64 @@ -90,21 +90,14 @@ func LogPgReadFile(ctx context.Context, server *state.Server, globalCollectionOp err = db.QueryRowContext(ctx, QueryMarkerSQL+logReadSql, fileName, prevOffset).Scan(&newOffset, &logData) if err != nil { err = fmt.Errorf("LogReadSql/QueryRow: %s", err) - goto ErrorCleanup + return server.LogPrevState, nil, nil, err } var logFile state.LogFile - logFile, err = state.NewLogFile(nil, fileName) + logFile, err = state.NewLogFile(fileName) if err != nil { err = fmt.Errorf("error initializing log file: %s", err) - goto ErrorCleanup - } - - _, err := logFile.TmpFile.WriteString(logData) - if err != nil { - err = fmt.Errorf("Error writing to tempfile: %s", err) - logFile.Cleanup(logger) - goto ErrorCleanup + return server.LogPrevState, nil, nil, err } logReader := bufio.NewReader(strings.NewReader(logData)) @@ -119,11 +112,4 @@ func LogPgReadFile(ctx context.Context, server *state.Server, globalCollectionOp psl.ReadFileMarkers = newMarkers return psl, logFiles, samples, err - -ErrorCleanup: - for _, logFile := range logFiles { - logFile.Cleanup(logger) - } - - return server.LogPrevState, nil, nil, err } diff --git a/input/system/rds/logs.go b/input/system/rds/logs.go index cf9f9d6b..e150772c 100644 --- a/input/system/rds/logs.go +++ b/input/system/rds/logs.go @@ -5,9 +5,6 @@ import ( "context" "errors" "fmt" - "io" - "io/ioutil" - "os" "strings" "time" @@ -21,6 +18,16 @@ import ( "github.com/aws/aws-sdk-go/service/rds" ) +// Analyze and submit at most the trailing 10 megabytes of the retrieved RDS log file portions +// +// This avoids an OOM in two edge cases: +// 1) When starting the collector, as we always load the last 10,000 lines (which may be very long) +// 2) When extremely large values are output in a single log event (e.g. query parameters in a DETAIL line) +// +// We intentionally throw away data here (and warn the user about it), since the alternative +// is often a collector crash (due to OOM), which would be less desirable. +const maxLogParsingSize = 10 * 1024 * 1024 + // DownloadLogFiles - Gets log files for an Amazon RDS instance func DownloadLogFiles(ctx context.Context, server *state.Server, logger *util.Logger) (state.PersistedLogState, []state.LogFile, []state.PostgresQuerySample, error) { var err error @@ -60,58 +67,52 @@ func DownloadLogFiles(ctx context.Context, server *state.Server, logger *util.Lo var newMarkers = make(map[string]string) for _, rdsLogFile := range resp.DescribeDBLogFiles { + var content []byte var lastMarker *string - var bytesWritten int64 - prevMarker, ok := psl.AwsMarkers[*rdsLogFile.LogFileName] if ok { lastMarker = &prevMarker } - var tmpFile *os.File - tmpFile, err = ioutil.TempFile("", "") - if err != nil { - err = fmt.Errorf("Error allocating tempfile for logs: %s", err) - goto ErrorCleanup - } - for { - var newBytesWritten int + var newContent string var newMarker *string var additionalDataPending bool - newBytesWritten, newMarker, additionalDataPending, err = downloadRdsLogFilePortion(rdsSvc, tmpFile, logger, &identifier, rdsLogFile.LogFileName, lastMarker) + newContent, newMarker, additionalDataPending, err = downloadRdsLogFilePortion(rdsSvc, logger, &identifier, rdsLogFile.LogFileName, lastMarker) if err != nil { - util.CleanUpTmpFile(tmpFile, logger) - goto ErrorCleanup + return server.LogPrevState, nil, nil, err + } + if len(newContent) > maxLogParsingSize { + content = []byte(newContent[len(newContent)-maxLogParsingSize:]) + } else { + // Shift existing data left if needed + overflow := len(content) + len(newContent) - maxLogParsingSize + if overflow > 0 { + copy(content, content[overflow:]) + } + pos := min(len(content), maxLogParsingSize-len(newContent)) + // Resize result buffer if needed + if pos+len(newContent) > len(content) { + content = append(content, make([]byte, pos+len(newContent)-len(content))...) + } + copy(content[pos:], newContent) } - - bytesWritten += int64(newBytesWritten) if newMarker != nil { lastMarker = newMarker } - if !additionalDataPending { break } } - var buf []byte - buf, tmpFile, err = readLogFilePortion(tmpFile, bytesWritten, logger) - if err != nil { - util.CleanUpTmpFile(tmpFile, logger) - goto ErrorCleanup - } - - fileContent := bufio.NewReader(strings.NewReader(string(buf))) - newLogLines, newSamples := logs.ParseAndAnalyzeBuffer(fileContent, linesNewerThan, server) + stream := bufio.NewReader(strings.NewReader(string(content))) + newLogLines, newSamples := logs.ParseAndAnalyzeBuffer(stream, linesNewerThan, server) - // Pass responsibility to LogFile for cleaning up the temp file var logFile state.LogFile - logFile, err = state.NewLogFile(tmpFile, *rdsLogFile.LogFileName) + logFile, err = state.NewLogFile(*rdsLogFile.LogFileName) if err != nil { err = fmt.Errorf("error initializing log file: %s", err) - util.CleanUpTmpFile(tmpFile, logger) - goto ErrorCleanup + return server.LogPrevState, nil, nil, err } logFile.LogLines = append(logFile.LogLines, newLogLines...) samples = append(samples, newSamples...) @@ -125,13 +126,6 @@ func DownloadLogFiles(ctx context.Context, server *state.Server, logger *util.Lo psl.AwsMarkers = newMarkers return psl, logFiles, samples, err - -ErrorCleanup: - for _, logFile := range logFiles { - logFile.Cleanup(logger) - } - - return server.LogPrevState, nil, nil, err } var DescribeDBClustersErrorCache *util.TTLMap = util.NewTTLMap(10 * 60) @@ -164,7 +158,7 @@ func getAwsDbInstanceID(config config.ServerConfig, sess *session.Session) (stri return *instance.DBInstanceIdentifier, nil } -func downloadRdsLogFilePortion(rdsSvc *rds.RDS, tmpFile *os.File, logger *util.Logger, identifier *string, logFileName *string, lastMarker *string) (newBytesWritten int, newMarker *string, additionalDataPending bool, err error) { +func downloadRdsLogFilePortion(rdsSvc *rds.RDS, logger *util.Logger, identifier *string, logFileName *string, lastMarker *string) (content string, newMarker *string, additionalDataPending bool, err error) { var resp *rds.DownloadDBLogFilePortionOutput resp, err = rdsSvc.DownloadDBLogFilePortion(&rds.DownloadDBLogFilePortionInput{ DBInstanceIdentifier: identifier, @@ -182,71 +176,9 @@ func downloadRdsLogFilePortion(rdsSvc *rds.RDS, tmpFile *os.File, logger *util.L return } - if len(*resp.LogFileData) > 0 { - newBytesWritten, err = tmpFile.WriteString(*resp.LogFileData) - if err != nil { - err = fmt.Errorf("Error writing to tempfile: %s", err) - return - } - } - + content = *resp.LogFileData newMarker = resp.Marker additionalDataPending = *resp.AdditionalDataPending return } - -// Analyze and submit at most the trailing 10 megabytes of the retrieved RDS log file portions -// -// This avoids an OOM in two edge cases: -// 1) When starting the collector, as we always load the last 10,000 lines (which may be very long) -// 2) When extremely large values are output in a single log event (e.g. query parameters in a DETAIL line) -// -// We intentionally throw away data here (and warn the user about it), since the alternative -// is often a collector crash (due to OOM), which would be less desirable. -const maxLogParsingSize = 10 * 1024 * 1024 - -func readLogFilePortion(tmpFile *os.File, bytesWritten int64, logger *util.Logger) ([]byte, *os.File, error) { - var err error - var readSize int64 - - exceededMaxParsingSize := bytesWritten > maxLogParsingSize - if exceededMaxParsingSize { - logger.PrintWarning("RDS log file portion exceeded more than 10 MB of data in 30 second interval, collecting most recent data only (skipping %d bytes)", bytesWritten-maxLogParsingSize) - readSize = maxLogParsingSize - } else { - readSize = bytesWritten - } - - // Read the data into memory for analysis - _, err = tmpFile.Seek(bytesWritten-readSize, io.SeekStart) - if err != nil { - return nil, tmpFile, fmt.Errorf("Error seeking tempfile: %s", err) - } - buf := make([]byte, readSize) - _, err = io.ReadFull(tmpFile, buf) - if err != nil { - return nil, tmpFile, fmt.Errorf("Error reading %d bytes from tempfile: %s", len(buf), err) - } - - // If necessary, recreate tempfile with just the data we're analyzing - // (this supports the later read of the temp file during the log upload) - if exceededMaxParsingSize { - truncatedTmpFile, err := ioutil.TempFile("", "") - if err != nil { - return nil, tmpFile, fmt.Errorf("Error allocating tempfile for logs: %s", err) - } - - _, err = truncatedTmpFile.Write(buf) - if err != nil { - util.CleanUpTmpFile(truncatedTmpFile, logger) - return nil, tmpFile, fmt.Errorf("Error writing to tempfile: %s", err) - } - - // We succeeded, so remove the previous file and use the new one going forward - util.CleanUpTmpFile(tmpFile, logger) - tmpFile = truncatedTmpFile - } - - return buf, tmpFile, nil -} diff --git a/logs/analyze.go b/logs/analyze.go index e4997046..bf036420 100644 --- a/logs/analyze.go +++ b/logs/analyze.go @@ -2266,11 +2266,6 @@ func AnalyzeBackendLogLines(logLines []state.LogLine) (logLinesOut []state.LogLi logLinesOut = append(logLinesOut, logLine) } - // Remove log line content. Note that ReplaceSecrets adds it back after secrets have been removed. - for idx := range logLinesOut { - logLinesOut[idx].Content = "" - } - return } diff --git a/logs/analyze_test.go b/logs/analyze_test.go index 0830a660..d0261976 100644 --- a/logs/analyze_test.go +++ b/logs/analyze_test.go @@ -4328,6 +4328,9 @@ var tests = []testpair{ func TestAnalyzeLogLines(t *testing.T) { for _, pair := range tests { l, s := logs.AnalyzeLogLines(pair.logLinesIn) + for idx := range l { + l[idx].Content = "" + } cfg := pretty.CompareConfig cfg.SkipZeroFields = true @@ -4387,6 +4390,9 @@ func TestAnalyzeLogLinesHeroku(t *testing.T) { t.Setenv("PORT", "dummy") for _, pair := range testsHeroku { l, s := logs.AnalyzeLogLines(pair.logLinesIn) + for idx := range l { + l[idx].Content = "" + } cfg := pretty.CompareConfig cfg.SkipZeroFields = true diff --git a/logs/debug.go b/logs/debug.go index b665d1bf..b608ee08 100644 --- a/logs/debug.go +++ b/logs/debug.go @@ -12,7 +12,7 @@ import ( "github.com/pganalyze/collector/state" ) -func PrintDebugInfo(logFileContents string, logLines []state.LogLine, samples []state.PostgresQuerySample) { +func PrintDebugInfo(logLines []state.LogLine, samples []state.PostgresQuerySample) { fmt.Printf("log lines: %d, query samples: %d\n", len(logLines), len(samples)) groups := map[pganalyze_collector.LogLineInformation_LogClassification]int{} unclassifiedLogLines := []state.LogLine{} @@ -35,15 +35,13 @@ func PrintDebugInfo(logFileContents string, logLines []state.LogLine, samples [] if len(unclassifiedLogLines) > 0 { fmt.Printf("\nUnclassified log lines:\n") for _, logLine := range unclassifiedLogLines { - fmt.Printf("%s\n", logFileContents[logLine.ByteStart:logLine.ByteEnd]) - fmt.Printf(" Level: %s\n", logLine.LogLevel) - fmt.Printf(" Content: %#v\n", logFileContents[logLine.ByteContentStart:logLine.ByteEnd]) + fmt.Printf(" %s: %s", logLine.LogLevel, logLine.Content) fmt.Printf("---\n") } } } -func PrintDebugLogLines(logFileContents string, logLines []state.LogLine, classifications map[pganalyze_collector.LogLineInformation_LogClassification]bool) { +func PrintDebugLogLines(logLines []state.LogLine, classifications map[pganalyze_collector.LogLineInformation_LogClassification]bool) { fmt.Println("\nParsed log lines:") linesById := make(map[uuid.UUID]*state.LogLine) for _, logLine := range logLines { @@ -63,15 +61,13 @@ func PrintDebugLogLines(logFileContents string, logLines []state.LogLine, classi if err != nil { panic(err) } - fmt.Printf("%s\n", logFileContents[logLine.ByteStart:logLine.ByteEnd]) - fmt.Printf(" Level: %s\n", logLine.LogLevel) + fmt.Printf(" %s: %s", logLine.LogLevel, logLine.Content) if logLine.ParentUUID == uuid.Nil { fmt.Printf(" Classification: %s (%d)\n", logLine.Classification, logLine.Classification) } if len(logLine.Details) > 0 { fmt.Printf(" Details: %s\n", detailsStr) } - fmt.Printf(" Content: %#v\n", logFileContents[logLine.ByteContentStart:logLine.ByteEnd]) fmt.Printf("---\n") } } diff --git a/logs/replace.go b/logs/replace.go index c28d8c35..395d7e11 100644 --- a/logs/replace.go +++ b/logs/replace.go @@ -9,7 +9,7 @@ import ( const replacement = "[redacted]" -func ReplaceSecrets(input []byte, logLines []state.LogLine, filterLogSecret []state.LogSecretKind) { +func ReplaceSecrets(logLines []state.LogLine, filterLogSecret []state.LogSecretKind) { filterUnidentified := false for _, k := range filterLogSecret { if k == state.UnidentifiedLogSecret { @@ -20,10 +20,10 @@ func ReplaceSecrets(input []byte, logLines []state.LogLine, filterLogSecret []st if filterUnidentified && logLines[idx].Classification == 0 { logLines[idx].Content = replacement + "\n" } else { - content := input[logLines[idx].ByteContentStart:logLines[idx].ByteEnd] sort.Slice(logLine.SecretMarkers, func(i, j int) bool { return logLine.SecretMarkers[i].ByteStart < logLine.SecretMarkers[j].ByteEnd }) + content := []byte(logLine.Content) bytesChecked := 0 offset := 0 for _, m := range logLine.SecretMarkers { diff --git a/logs/replace_test.go b/logs/replace_test.go index 2478b1d2..afd50b06 100644 --- a/logs/replace_test.go +++ b/logs/replace_test.go @@ -57,7 +57,7 @@ func TestReplaceSecrets(t *testing.T) { server := state.MakeServer(config.ServerConfig{}, false) server.LogParser = logs.NewLogParser(logs.LogPrefixAmazonRds, nil, false) logLines, _ := logs.ParseAndAnalyzeBuffer(reader, time.Time{}, server) - logs.ReplaceSecrets([]byte(pair.input), logLines, state.ParseFilterLogSecret(pair.filterLogSecret)) + logs.ReplaceSecrets(logLines, state.ParseFilterLogSecret(pair.filterLogSecret)) cfg := pretty.CompareConfig cfg.SkipZeroFields = true diff --git a/logs/stream/stream.go b/logs/stream/stream.go index 4c6f5e0c..59155cee 100644 --- a/logs/stream/stream.go +++ b/logs/stream/stream.go @@ -159,20 +159,14 @@ func isAdditionalLineLevel(str pganalyze_collector.LogLineInformation_LogLevel) return false } -// writeTmpLogFile - Setup temporary file that will be used for encryption -func writeTmpLogFile(readyLogLines []state.LogLine, logger *util.Logger) (state.LogFile, error) { - logFile, err := state.NewLogFile(nil, "") +func createLogFile(readyLogLines []state.LogLine, logger *util.Logger) (state.LogFile, error) { + logFile, err := state.NewLogFile("") if err != nil { return state.LogFile{}, fmt.Errorf("could not initialize log file: %s", err) } currentByteStart := int64(0) for idx, logLine := range readyLogLines { - _, err = logFile.TmpFile.WriteString(logLine.Content) - if err != nil { - logFile.Cleanup(logger) - return logFile, err - } logLine.ByteStart = currentByteStart logLine.ByteContentStart = currentByteStart logLine.ByteEnd = currentByteStart + int64(len(logLine.Content)) @@ -307,7 +301,7 @@ func AnalyzeStreamInGroups(logLines []state.LogLine, now time.Time, server *stat } } - logFile, err := writeTmpLogFile(analyzableLogLines, logger) + logFile, err := createLogFile(analyzableLogLines, logger) if err != nil { return state.TransientLogState{}, state.LogFile{}, logLines, err } diff --git a/logs/stream/stream_test.go b/logs/stream/stream_test.go index 47c83c71..94940d62 100644 --- a/logs/stream/stream_test.go +++ b/logs/stream/stream_test.go @@ -1,7 +1,6 @@ package stream_test import ( - "io/ioutil" "log" "os" "sort" @@ -364,16 +363,12 @@ func TestAnalyzeStreamInGroups(t *testing.T) { server := state.MakeServer(config.ServerConfig{}, false) TransientLogState, logFile, tooFreshLogLines, err := stream.AnalyzeStreamInGroups(pair.logLines, now, server, &util.Logger{Destination: log.New(os.Stderr, "", log.LstdFlags)}) logFileContent := "" - if logFile.TmpFile != nil { - dat, err := ioutil.ReadFile(logFile.TmpFile.Name()) - if err != nil { - t.Errorf("Error reading temporary log file: %s", err) - } - logFileContent = string(dat) + for idx, logLine := range logFile.LogLines { + logFileContent += logLine.Content + logFile.LogLines[idx].Content = "" } TransientLogState.CollectedAt = time.Time{} // Avoid comparing against time.Now() - logFile.TmpFile = nil // Avoid comparing against tempfile logFile.UUID = uuid.UUID{} // Avoid comparing against a generated UUID cfg := pretty.CompareConfig diff --git a/main.go b/main.go index 7c60bd76..e521de2d 100644 --- a/main.go +++ b/main.go @@ -431,9 +431,7 @@ func main() { fmt.Printf("ERROR: %s\n", err) return } - content := string(contentBytes) - reader := strings.NewReader(content) - logReader := logs.NewMaybeHerokuLogReader(reader) + logReader := logs.NewMaybeHerokuLogReader(strings.NewReader(string(contentBytes))) server := state.MakeServer(config.ServerConfig{}, false) tz, err := time.LoadLocation(analyzeLogfileTz) if err != nil { @@ -447,7 +445,7 @@ func main() { server.LogParser = logs.NewLogParser(analyzeLogfilePrefix, tz, false) logLines, samples := logs.ParseAndAnalyzeBuffer(logReader, time.Time{}, server) - logs.PrintDebugInfo(content, logLines, samples) + logs.PrintDebugInfo(logLines, samples) if analyzeDebugClassifications != "" { classifications := strings.Split(analyzeDebugClassifications, ",") classMap := make(map[pganalyze_collector.LogLineInformation_LogClassification]bool) @@ -463,7 +461,7 @@ func main() { classInt := int32(classVal) classMap[pganalyze_collector.LogLineInformation_LogClassification(classInt)] = true } - logs.PrintDebugLogLines(content, logLines, classMap) + logs.PrintDebugLogLines(logLines, classMap) } return } @@ -474,11 +472,9 @@ func main() { fmt.Printf("ERROR: %s\n", err) return } - content := string(contentBytes) - reader := strings.NewReader(content) - logReader := logs.NewMaybeHerokuLogReader(reader) + logReader := logs.NewMaybeHerokuLogReader(strings.NewReader(string(contentBytes))) logLines, _ := logs.ParseAndAnalyzeBuffer(logReader, time.Time{}, state.MakeServer(config.ServerConfig{}, false)) - logs.ReplaceSecrets(contentBytes, logLines, state.ParseFilterLogSecret(filterLogSecret)) + logs.ReplaceSecrets(logLines, state.ParseFilterLogSecret(filterLogSecret)) output := "" for _, logLine := range logLines { output += logLine.Content diff --git a/runner/logs.go b/runner/logs.go index ab356b84..4b46fee5 100644 --- a/runner/logs.go +++ b/runner/logs.go @@ -3,7 +3,6 @@ package runner import ( "context" "fmt" - "io/ioutil" "strings" "sync" "time" @@ -151,7 +150,6 @@ func downloadLogsForServer(ctx context.Context, server *state.Server, globalColl return server.LogPrevState, false, err } transientLogState := state.TransientLogState{CollectedAt: time.Now()} - defer transientLogState.Cleanup(logger) var newLogState state.PersistedLogState newLogState, transientLogState.LogFiles, transientLogState.QuerySamples, err = system.DownloadLogFiles(ctx, server, globalCollectionOpts, logger) @@ -242,7 +240,6 @@ func processLogStream(ctx context.Context, server *state.Server, logLines []stat logger.PrintError("%s", err) return tooFreshLogLines } - defer transientLogState.Cleanup(logger) transientLogState.LogFiles = []state.LogFile{logFile} @@ -332,24 +329,16 @@ func postprocessAndSendLogs(ctx context.Context, server *state.Server, globalCol for idx := range transientLogState.LogFiles { logFile := &transientLogState.LogFiles[idx] + logFile.ByteSize = int64(logFile.LogLines[len(logFile.LogLines)-1].ByteEnd) if len(logFile.FilterLogSecret) > 0 { - content, err := ioutil.ReadFile(logFile.TmpFile.Name()) - if err != nil { - return err - } - logFile.ByteSize = int64(len(content)) - logs.ReplaceSecrets(content, logFile.LogLines, logFile.FilterLogSecret) + logs.ReplaceSecrets(logFile.LogLines, logFile.FilterLogSecret) } } if globalCollectionOpts.DebugLogs { logger.PrintInfo("Would have sent log state:\n") for _, logFile := range transientLogState.LogFiles { - content, err := ioutil.ReadFile(logFile.TmpFile.Name()) - if err != nil { - return err - } - logs.PrintDebugInfo(string(content), logFile.LogLines, transientLogState.QuerySamples) + logs.PrintDebugInfo(logFile.LogLines, transientLogState.QuerySamples) } return nil } diff --git a/state/logs.go b/state/logs.go index bf9bb7f8..7f79740f 100644 --- a/state/logs.go +++ b/state/logs.go @@ -2,15 +2,12 @@ package state import ( "fmt" - "io/ioutil" - "os" "strings" "time" "github.com/google/uuid" "github.com/pganalyze/collector/config" "github.com/pganalyze/collector/output/pganalyze_collector" - "github.com/pganalyze/collector/util" ) type GrantLogs struct { @@ -64,8 +61,6 @@ type LogFile struct { ByteSize int64 OriginalName string - TmpFile *os.File - FilterLogSecret []LogSecretKind } @@ -183,33 +178,13 @@ type LogLine struct { SecretMarkers []LogSecretMarker } -func NewLogFile(tmpFile *os.File, originalName string) (LogFile, error) { - var err error - if tmpFile == nil { - tmpFile, err = ioutil.TempFile("", "") - if err != nil { - return LogFile{}, fmt.Errorf("error allocating tempfile for logs: %s", err) - } - } +func NewLogFile(originalName string) (LogFile, error) { uuid, err := uuid.NewV7() if err != nil { return LogFile{}, fmt.Errorf("error generating log file UUID: %s", err) } return LogFile{ UUID: uuid, - TmpFile: tmpFile, OriginalName: originalName, }, nil } - -func (logFile *LogFile) Cleanup(logger *util.Logger) { - if logFile.TmpFile != nil { - util.CleanUpTmpFile(logFile.TmpFile, logger) - } -} - -func (ls *TransientLogState) Cleanup(logger *util.Logger) { - for _, logFile := range ls.LogFiles { - logFile.Cleanup(logger) - } -} diff --git a/util/tmp_file.go b/util/tmp_file.go deleted file mode 100644 index f4c0715f..00000000 --- a/util/tmp_file.go +++ /dev/null @@ -1,18 +0,0 @@ -package util - -import ( - "os" -) - -// Clean up a created temporary file, reporting any errors to the log -func CleanUpTmpFile(tmpFile *os.File, logger *Logger) { - err := tmpFile.Close() - if err != nil { - logger.PrintError("Failed to close temporary file \"%s\": %s", tmpFile.Name(), err) - } - - err = os.Remove(tmpFile.Name()) - if err != nil { - logger.PrintError("Failed to delete temporary file \"%s\": %s", tmpFile.Name(), err) - } -}