diff --git a/Makefile b/Makefile index ebea372..d5c8c2a 100644 --- a/Makefile +++ b/Makefile @@ -4,10 +4,10 @@ TOOLS_BIN = tools/bin NPM_BIN = node_modules/.bin OAPI_CODEGEN = $(TOOLS_BIN)/oapi-codegen -SWAGGER_CLI = $(NPM_BIN)/swagger-cli +REDOCLY_CLI = $(NPM_BIN)/redocly NPM_PKG_SPECS = \ - @apidevtools/swagger-cli@^4.0.4 + @redocly/cli@latest ifeq ($(CI),) GO_BUILD_FLAGS = @@ -31,16 +31,16 @@ test-cover: .PHONY: generate # Generates client api -generate: $(SWAGGER_CLI) $(OAPI_CODEGEN) - $(SWAGGER_CLI) bundle ../TidepoolApi/reference/summary.v1.yaml -o ./spec/summary.v1.yaml -t yaml +generate: $(REDOCLY_CLI) $(OAPI_CODEGEN) + $(REDOCLY_CLI) bundle ../TidepoolApi/reference/summary.v1.yaml -o ./spec/summary.v1.yaml $(OAPI_CODEGEN) -package=api -generate=types spec/summary.v1.yaml > clients/summary/types.go $(OAPI_CODEGEN) -package=api -generate=client spec/summary.v1.yaml > clients/summary/client.go cd clients/summary && go generate ./... $(OAPI_CODEGEN): - GOBIN=$(shell pwd)/$(TOOLS_BIN) go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@v2.3.0 + GOBIN=$(shell pwd)/$(TOOLS_BIN) go install github.com/oapi-codegen/oapi-codegen/v2/cmd/oapi-codegen@v2.4.1 -$(SWAGGER_CLI): npm-tools +$(REDOCLY_CLI): npm-tools .PHONY: npm-tools npm-tools: diff --git a/clients/summary/client.go b/clients/summary/client.go index 1fdaf45..5f0f990 100644 --- a/clients/summary/client.go +++ b/clients/summary/client.go @@ -195,7 +195,7 @@ type ClientWithResponsesInterface interface { type GetSummaryResponse struct { Body []byte HTTPResponse *http.Response - JSON200 *Summary + JSON200 *SummaryV5 } // Status returns HTTPResponse.Status @@ -238,7 +238,7 @@ func ParseGetSummaryResponse(rsp *http.Response) (*GetSummaryResponse, error) { switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest Summary + var dest SummaryV5 if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } diff --git a/clients/summary/types.go b/clients/summary/types.go index 554b4ef..aa71ead 100644 --- a/clients/summary/types.go +++ b/clients/summary/types.go @@ -14,713 +14,239 @@ const ( SessionTokenScopes = "sessionToken.Scopes" ) -// BGMBucketData Series of counters which represent one hour of a users data -type BGMBucketData struct { - LastRecordTime *time.Time `json:"lastRecordTime,omitempty"` +// BgmperiodsV5 A map to each supported BGM summary period +type BgmperiodsV5 map[string]GlucoseperiodV5 - // TimeInHighRecords Counter of records in high glucose range - TimeInHighRecords *int `json:"timeInHighRecords,omitempty"` +// CgmperiodsV5 A map to each supported CGM summary period +type CgmperiodsV5 map[string]GlucoseperiodV5 - // TimeInLowRecords Counter of records in low glucose range - TimeInLowRecords *int `json:"timeInLowRecords,omitempty"` +// ConfigV1 Summary schema version and calculation configuration +type ConfigV1 struct { + // HighGlucoseThreshold Threshold used for determining if a value is high + HighGlucoseThreshold float64 `json:"highGlucoseThreshold"` - // TimeInTargetRecords Counter of records in target glucose range - TimeInTargetRecords *int `json:"timeInTargetRecords,omitempty"` + // LowGlucoseThreshold Threshold used for determining if a value is low + LowGlucoseThreshold float64 `json:"lowGlucoseThreshold"` - // TimeInVeryHighRecords Counter of records in very high glucose range - TimeInVeryHighRecords *int `json:"timeInVeryHighRecords,omitempty"` + // SchemaVersion Summary schema version + SchemaVersion int `json:"schemaVersion"` - // TimeInVeryLowRecords Counter of records in very low glucose range - TimeInVeryLowRecords *int `json:"timeInVeryLowRecords,omitempty"` + // VeryHighGlucoseThreshold Threshold used for determining if a value is very high + VeryHighGlucoseThreshold float64 `json:"veryHighGlucoseThreshold"` - // TotalGlucose Total value of all glucose records - TotalGlucose float64 `json:"totalGlucose"` + // VeryLowGlucoseThreshold Threshold used for determining if a value is very low + VeryLowGlucoseThreshold float64 `json:"veryLowGlucoseThreshold"` } -// BGMPeriod Summary of a specific BGM time period (currently: 1d, 7d, 14d, 30d) -type BGMPeriod struct { +// ContinuousperiodV5 defines model for continuousperiod.v5. +type ContinuousperiodV5 struct { // AverageDailyRecords Average daily readings AverageDailyRecords *float64 `json:"averageDailyRecords,omitempty"` - // AverageDailyRecordsDelta Difference between the averageDailyRecords in this period and version in the opposite offset - AverageDailyRecordsDelta *float64 `json:"averageDailyRecordsDelta,omitempty"` - - // AverageGlucoseMmol Average Glucose of records in this period - AverageGlucoseMmol *float64 `json:"averageGlucoseMmol,omitempty"` - - // AverageGlucoseMmolDelta Difference between the averageGlucose in this period and the other offset version - AverageGlucoseMmolDelta *float64 `json:"averageGlucoseMmolDelta,omitempty"` - HasAverageDailyRecords bool `json:"hasAverageDailyRecords"` - HasAverageGlucoseMmol bool `json:"hasAverageGlucoseMmol"` - HasTimeInAnyHighPercent bool `json:"hasTimeInAnyHighPercent"` - HasTimeInAnyHighRecords bool `json:"hasTimeInAnyHighRecords"` - HasTimeInAnyLowPercent bool `json:"hasTimeInAnyLowPercent"` - HasTimeInAnyLowRecords bool `json:"hasTimeInAnyLowRecords"` - HasTimeInExtremeHighPercent bool `json:"hasTimeInExtremeHighPercent"` - HasTimeInExtremeHighRecords bool `json:"hasTimeInExtremeHighRecords"` - HasTimeInHighPercent bool `json:"hasTimeInHighPercent"` - HasTimeInHighRecords bool `json:"hasTimeInHighRecords"` - HasTimeInLowPercent bool `json:"hasTimeInLowPercent"` - HasTimeInLowRecords bool `json:"hasTimeInLowRecords"` - HasTimeInTargetPercent bool `json:"hasTimeInTargetPercent"` - HasTimeInTargetRecords bool `json:"hasTimeInTargetRecords"` - HasTimeInVeryHighPercent bool `json:"hasTimeInVeryHighPercent"` - HasTimeInVeryHighRecords bool `json:"hasTimeInVeryHighRecords"` - HasTimeInVeryLowPercent bool `json:"hasTimeInVeryLowPercent"` - HasTimeInVeryLowRecords bool `json:"hasTimeInVeryLowRecords"` - HasTotalRecords bool `json:"hasTotalRecords"` - - // TimeInAnyHighPercent Percentage of time spent in Any high glucose range - TimeInAnyHighPercent *float64 `json:"timeInAnyHighPercent,omitempty"` - - // TimeInAnyHighPercentDelta Difference between the timeInAnyHighPercent in this period and version in the opposite offset - TimeInAnyHighPercentDelta *float64 `json:"timeInAnyHighPercentDelta,omitempty"` - - // TimeInAnyHighRecords Counter of records in Any high glucose range - TimeInAnyHighRecords *int `json:"timeInAnyHighRecords,omitempty"` - - // TimeInAnyHighRecordsDelta Difference between the timeInAnyHighRecords in this period and version in the opposite offset - TimeInAnyHighRecordsDelta *int `json:"timeInAnyHighRecordsDelta,omitempty"` - - // TimeInAnyLowPercent Percentage of time spent in Any low glucose range - TimeInAnyLowPercent *float64 `json:"timeInAnyLowPercent,omitempty"` - - // TimeInAnyLowPercentDelta Difference between the timeInAnyLowPercent in this period and version in the opposite offset - TimeInAnyLowPercentDelta *float64 `json:"timeInAnyLowPercentDelta,omitempty"` - - // TimeInAnyLowRecords Counter of records in Any low glucose range - TimeInAnyLowRecords *int `json:"timeInAnyLowRecords,omitempty"` - - // TimeInAnyLowRecordsDelta Difference between the timeInAnyLowRecords in this period and version in the opposite offset - TimeInAnyLowRecordsDelta *int `json:"timeInAnyLowRecordsDelta,omitempty"` - - // TimeInExtremeHighPercent Percentage of time spent in extreme high glucose range - TimeInExtremeHighPercent *float64 `json:"timeInExtremeHighPercent,omitempty"` - - // TimeInExtremeHighPercentDelta Difference between the timeInExtremeHighPercent in this period and version in the opposite offset - TimeInExtremeHighPercentDelta *float64 `json:"timeInExtremeHighPercentDelta,omitempty"` - - // TimeInExtremeHighRecords Counter of records in extreme high glucose range - TimeInExtremeHighRecords *int `json:"timeInExtremeHighRecords,omitempty"` - - // TimeInExtremeHighRecordsDelta Difference between the timeInExtremeHighRecords in this period and version in the opposite offset - TimeInExtremeHighRecordsDelta *int `json:"timeInExtremeHighRecordsDelta,omitempty"` - - // TimeInHighPercent Percentage of time spent in high glucose range - TimeInHighPercent *float64 `json:"timeInHighPercent,omitempty"` - - // TimeInHighPercentDelta Difference between the timeInHighPercent in this period and version in the opposite offset - TimeInHighPercentDelta *float64 `json:"timeInHighPercentDelta,omitempty"` - - // TimeInHighRecords Counter of records in high glucose range - TimeInHighRecords *int `json:"timeInHighRecords,omitempty"` + // Deferred Metrics for a particular range of glucose values + Deferred GlucoserangeV5 `json:"deferred,omitempty"` - // TimeInHighRecordsDelta Difference between the timeInHighRecords in this period and version in the opposite offset - TimeInHighRecordsDelta *int `json:"timeInHighRecordsDelta,omitempty"` + // Realtime Metrics for a particular range of glucose values + Realtime GlucoserangeV5 `json:"realtime,omitempty"` - // TimeInLowPercent Percentage of time spent in low glucose range - TimeInLowPercent *float64 `json:"timeInLowPercent,omitempty"` - - // TimeInLowPercentDelta Difference between the timeInLowPercent in this period and version in the opposite offset - TimeInLowPercentDelta *float64 `json:"timeInLowPercentDelta,omitempty"` - - // TimeInLowRecords Counter of records in low glucose range - TimeInLowRecords *int `json:"timeInLowRecords,omitempty"` - - // TimeInLowRecordsDelta Difference between the timeInLowRecords in this period and version in the opposite offset - TimeInLowRecordsDelta *int `json:"timeInLowRecordsDelta,omitempty"` - - // TimeInTargetPercent Percentage of time spent in target glucose range - TimeInTargetPercent *float64 `json:"timeInTargetPercent,omitempty"` - - // TimeInTargetPercentDelta Difference between the timeInTargetPercent in this period and version in the opposite offset - TimeInTargetPercentDelta *float64 `json:"timeInTargetPercentDelta,omitempty"` - - // TimeInTargetRecords Counter of records in target glucose range - TimeInTargetRecords *int `json:"timeInTargetRecords,omitempty"` - - // TimeInTargetRecordsDelta Difference between the timeInTargetRecords in this period and version in the opposite offset - TimeInTargetRecordsDelta *int `json:"timeInTargetRecordsDelta,omitempty"` - - // TimeInVeryHighPercent Percentage of time spent in very high glucose range - TimeInVeryHighPercent *float64 `json:"timeInVeryHighPercent,omitempty"` - - // TimeInVeryHighPercentDelta Difference between the timeInVeryHighPercent in this period and version in the opposite offset - TimeInVeryHighPercentDelta *float64 `json:"timeInVeryHighPercentDelta,omitempty"` - - // TimeInVeryHighRecords Counter of records in very high glucose range - TimeInVeryHighRecords *int `json:"timeInVeryHighRecords,omitempty"` - - // TimeInVeryHighRecordsDelta Difference between the timeInVeryHighRecords in this period and version in the opposite offset - TimeInVeryHighRecordsDelta *int `json:"timeInVeryHighRecordsDelta,omitempty"` - - // TimeInVeryLowPercent Percentage of time spent in very low glucose range - TimeInVeryLowPercent *float64 `json:"timeInVeryLowPercent,omitempty"` - - // TimeInVeryLowPercentDelta Difference between the timeInVeryLowPercent in this period and version in the opposite offset - TimeInVeryLowPercentDelta *float64 `json:"timeInVeryLowPercentDelta,omitempty"` - - // TimeInVeryLowRecords Counter of records in very low glucose range - TimeInVeryLowRecords *int `json:"timeInVeryLowRecords,omitempty"` - - // TimeInVeryLowRecordsDelta Difference between the timeInVeryLowRecords in this period and version in the opposite offset - TimeInVeryLowRecordsDelta *int `json:"timeInVeryLowRecordsDelta,omitempty"` - - // TotalRecords Counter of records - TotalRecords *int `json:"totalRecords,omitempty"` - - // TotalRecordsDelta Difference between the totalRecords in this period and version in the opposite offset - TotalRecordsDelta *int `json:"totalRecordsDelta,omitempty"` + // Total Metrics for a particular range of glucose values + Total GlucoserangeV5 `json:"total,omitempty"` } -// BGMPeriods A map to each supported BGM summary period -type BGMPeriods map[string]BGMPeriod - -// BGMStats A summary of a users recent BGM glucose values -type BGMStats struct { - // Buckets Rotating list containing the stats for each currently tracked hour in order - Buckets []Bucket `json:"buckets,omitempty"` +// ContinuousperiodsV5 A map to each supported Continuous summary period +type ContinuousperiodsV5 map[string]ContinuousperiodV5 - // OffsetPeriods A map to each supported BGM summary period - OffsetPeriods BGMPeriods `json:"offsetPeriods,omitempty"` +// ContinuousrangesV5 continuous ranges +type ContinuousrangesV5 struct { + // Deferred Metrics for a particular range of glucose values + Deferred GlucoserangeV5 `json:"deferred,omitempty"` - // Periods A map to each supported BGM summary period - Periods BGMPeriods `json:"periods,omitempty"` + // Realtime Metrics for a particular range of glucose values + Realtime GlucoserangeV5 `json:"realtime,omitempty"` - // TotalHours Total hours represented in the hourly stats - TotalHours int `json:"totalHours"` + // Total Metrics for a particular range of glucose values + Total GlucoserangeV5 `json:"total,omitempty"` } -// Bucket bucket containing an hour of bgm or cgm aggregations -type Bucket struct { - Data *Bucket_Data `json:"data,omitempty"` - Date time.Time `json:"date"` - LastRecordTime *time.Time `json:"lastRecordTime,omitempty"` -} - -// Bucket_Data defines model for Bucket.Data. -type Bucket_Data struct { - union json.RawMessage -} - -// CGMBucketData Series of counters which represent one hour of a users data -type CGMBucketData struct { - // HighMinutes Counter of minutes spent in high glucose range - HighMinutes int `json:"highMinutes"` - - // HighRecords Counter of records in high glucose range - HighRecords int `json:"highRecords"` - - // LowMinutes Counter of minutes spent in low glucose range - LowMinutes int `json:"lowMinutes"` - - // LowRecords Counter of records in low glucose range - LowRecords int `json:"lowRecords"` - - // TargetMinutes Counter of minutes spent in target glucose range - TargetMinutes int `json:"targetMinutes"` - - // TargetRecords Counter of records in target glucose range - TargetRecords int `json:"targetRecords"` - - // TotalGlucose Total value of all glucose records - TotalGlucose float64 `json:"totalGlucose"` - - // TotalMinutes Counter of minutes using a cgm - TotalMinutes int `json:"totalMinutes"` +// DatesV5 dates tracked for summary calculation +type DatesV5 struct { + // FirstData Date of the first included value + FirstData time.Time `json:"firstData"` - // TotalRecords Counter of records using a cgm - TotalRecords int `json:"totalRecords"` + // LastData Date of the last calculated value + LastData time.Time `json:"lastData"` - // TotalVariance Total variance of all glucose records - TotalVariance float64 `json:"totalVariance"` + // LastUpdatedDate Date of the last calculation + LastUpdatedDate time.Time `json:"lastUpdatedDate"` - // VeryHighMinutes Counter of minutes spent in very high glucose range - VeryHighMinutes int `json:"veryHighMinutes"` + // LastUpdatedReason List of reasons the summary was updated for + LastUpdatedReason []string `json:"lastUpdatedReason"` - // VeryHighRecords Counter of records in very high glucose range - VeryHighRecords int `json:"veryHighRecords"` + // LastUploadDate Created date of the last calculated value + LastUploadDate time.Time `json:"lastUploadDate"` - // VeryLowMinutes Counter of minutes spent in very low glucose range - VeryLowMinutes int `json:"veryLowMinutes"` + // OutdatedReason List of reasons the summary was marked outdated for + OutdatedReason []string `json:"outdatedReason"` - // VeryLowRecords Counter of records in very low glucose range - VeryLowRecords int `json:"veryLowRecords"` + // OutdatedSince Date of the first user upload after lastData, removed when calculated + OutdatedSince *time.Time `json:"outdatedSince,omitempty"` } -// CGMPeriod Summary of a specific CGM time period (currently: 1d, 7d, 14d, 30d) -type CGMPeriod struct { +// GlucoseperiodV5 defines model for glucoseperiod.v5. +type GlucoseperiodV5 struct { // AverageDailyRecords Average daily readings - AverageDailyRecords *float64 `json:"averageDailyRecords,omitempty"` - - // AverageDailyRecordsDelta Difference between the averageDailyRecords in this period and version in the opposite offset - AverageDailyRecordsDelta *float64 `json:"averageDailyRecordsDelta,omitempty"` + AverageDailyRecords float64 `json:"averageDailyRecords"` // AverageGlucoseMmol Average Glucose of records in this period - AverageGlucoseMmol *float64 `json:"averageGlucoseMmol,omitempty"` - - // AverageGlucoseMmolDelta Difference between the averageGlucose in this period and the other offset version - AverageGlucoseMmolDelta *float64 `json:"averageGlucoseMmolDelta,omitempty"` - CoefficientOfVariation float64 `json:"coefficientOfVariation"` - CoefficientOfVariationDelta float64 `json:"coefficientOfVariationDelta"` - DaysWithData int `json:"daysWithData"` - DaysWithDataDelta int `json:"daysWithDataDelta"` + AverageGlucoseMmol float64 `json:"averageGlucoseMmol"` + CoefficientOfVariation float64 `json:"coefficientOfVariation"` + DaysWithData int `json:"daysWithData"` // GlucoseManagementIndicator A derived value which emulates A1C - GlucoseManagementIndicator *float64 `json:"glucoseManagementIndicator,omitempty"` - - // GlucoseManagementIndicatorDelta Difference between the glucoseManagementIndicator in this period and the other offset version - GlucoseManagementIndicatorDelta *float64 `json:"glucoseManagementIndicatorDelta,omitempty"` - HasAverageDailyRecords bool `json:"hasAverageDailyRecords"` - HasAverageGlucoseMmol bool `json:"hasAverageGlucoseMmol"` - HasGlucoseManagementIndicator bool `json:"hasGlucoseManagementIndicator"` - HasTimeCGMUseMinutes bool `json:"hasTimeCGMUseMinutes"` - HasTimeCGMUsePercent bool `json:"hasTimeCGMUsePercent"` - HasTimeCGMUseRecords bool `json:"hasTimeCGMUseRecords"` - HasTimeInAnyHighMinutes bool `json:"hasTimeInAnyHighMinutes"` - HasTimeInAnyHighPercent bool `json:"hasTimeInAnyHighPercent"` - HasTimeInAnyHighRecords bool `json:"hasTimeInAnyHighRecords"` - HasTimeInAnyLowMinutes bool `json:"hasTimeInAnyLowMinutes"` - HasTimeInAnyLowPercent bool `json:"hasTimeInAnyLowPercent"` - HasTimeInAnyLowRecords bool `json:"hasTimeInAnyLowRecords"` - HasTimeInExtremeHighMinutes bool `json:"hasTimeInExtremeHighMinutes"` - HasTimeInExtremeHighPercent bool `json:"hasTimeInExtremeHighPercent"` - HasTimeInExtremeHighRecords bool `json:"hasTimeInExtremeHighRecords"` - HasTimeInHighMinutes bool `json:"hasTimeInHighMinutes"` - HasTimeInHighPercent bool `json:"hasTimeInHighPercent"` - HasTimeInHighRecords bool `json:"hasTimeInHighRecords"` - HasTimeInLowMinutes bool `json:"hasTimeInLowMinutes"` - HasTimeInLowPercent bool `json:"hasTimeInLowPercent"` - HasTimeInLowRecords bool `json:"hasTimeInLowRecords"` - HasTimeInTargetMinutes bool `json:"hasTimeInTargetMinutes"` - HasTimeInTargetPercent bool `json:"hasTimeInTargetPercent"` - HasTimeInTargetRecords bool `json:"hasTimeInTargetRecords"` - HasTimeInVeryHighMinutes bool `json:"hasTimeInVeryHighMinutes"` - HasTimeInVeryHighPercent bool `json:"hasTimeInVeryHighPercent"` - HasTimeInVeryHighRecords bool `json:"hasTimeInVeryHighRecords"` - HasTimeInVeryLowMinutes bool `json:"hasTimeInVeryLowMinutes"` - HasTimeInVeryLowPercent bool `json:"hasTimeInVeryLowPercent"` - HasTimeInVeryLowRecords bool `json:"hasTimeInVeryLowRecords"` - HasTotalRecords bool `json:"hasTotalRecords"` - HoursWithData int `json:"hoursWithData"` - HoursWithDataDelta int `json:"hoursWithDataDelta"` - StandardDeviation float64 `json:"standardDeviation"` - StandardDeviationDelta float64 `json:"standardDeviationDelta"` - - // TimeCGMUseMinutes Counter of minutes spent wearing a cgm - TimeCGMUseMinutes *int `json:"timeCGMUseMinutes,omitempty"` - - // TimeCGMUseMinutesDelta Difference between the timeCGMUseMinutes in this period and version in the opposite offset - TimeCGMUseMinutesDelta *int `json:"timeCGMUseMinutesDelta,omitempty"` - - // TimeCGMUsePercent Percentage of time spent wearing a cgm - TimeCGMUsePercent *float64 `json:"timeCGMUsePercent,omitempty"` - - // TimeCGMUsePercentDelta Difference between the timeCGMUsePercent in this period and version in the opposite offset - TimeCGMUsePercentDelta *float64 `json:"timeCGMUsePercentDelta,omitempty"` - - // TimeCGMUseRecords Counter of minutes spent wearing a cgm - TimeCGMUseRecords *int `json:"timeCGMUseRecords,omitempty"` - - // TimeCGMUseRecordsDelta Difference between the timeCGMUseRecords in this period and version in the opposite offset - TimeCGMUseRecordsDelta *int `json:"timeCGMUseRecordsDelta,omitempty"` - - // TimeInAnyHighMinutes Counter of minutes spent in Any high glucose range - TimeInAnyHighMinutes *int `json:"timeInAnyHighMinutes,omitempty"` - - // TimeInAnyHighMinutesDelta Difference between the timeInAnyHighMinutes in this period and version in the opposite offset - TimeInAnyHighMinutesDelta *int `json:"timeInAnyHighMinutesDelta,omitempty"` - - // TimeInAnyHighPercent Percentage of time spent in Any high glucose range - TimeInAnyHighPercent *float64 `json:"timeInAnyHighPercent,omitempty"` - - // TimeInAnyHighPercentDelta Difference between the timeInAnyHighPercent in this period and version in the opposite offset - TimeInAnyHighPercentDelta *float64 `json:"timeInAnyHighPercentDelta,omitempty"` - - // TimeInAnyHighRecords Counter of records in Any high glucose range - TimeInAnyHighRecords *int `json:"timeInAnyHighRecords,omitempty"` - - // TimeInAnyHighRecordsDelta Difference between the timeInAnyHighRecords in this period and version in the opposite offset - TimeInAnyHighRecordsDelta *int `json:"timeInAnyHighRecordsDelta,omitempty"` - - // TimeInAnyLowMinutes Counter of minutes spent in Any low glucose range - TimeInAnyLowMinutes *int `json:"timeInAnyLowMinutes,omitempty"` - - // TimeInAnyLowMinutesDelta Difference between the timeInAnyLowMinutes in this period and version in the opposite offset - TimeInAnyLowMinutesDelta *int `json:"timeInAnyLowMinutesDelta,omitempty"` - - // TimeInAnyLowPercent Percentage of time spent in Any low glucose range - TimeInAnyLowPercent *float64 `json:"timeInAnyLowPercent,omitempty"` - - // TimeInAnyLowPercentDelta Difference between the timeInAnyLowPercent in this period and version in the opposite offset - TimeInAnyLowPercentDelta *float64 `json:"timeInAnyLowPercentDelta,omitempty"` - - // TimeInAnyLowRecords Counter of records in Any low glucose range - TimeInAnyLowRecords *int `json:"timeInAnyLowRecords,omitempty"` - - // TimeInAnyLowRecordsDelta Difference between the timeInAnyLowRecords in this period and version in the opposite offset - TimeInAnyLowRecordsDelta *int `json:"timeInAnyLowRecordsDelta,omitempty"` - - // TimeInExtremeHighMinutes Counter of minutes spent in extreme high glucose range - TimeInExtremeHighMinutes *int `json:"timeInExtremeHighMinutes,omitempty"` - - // TimeInExtremeHighMinutesDelta Difference between the timeInExtremeHighMinutes in this period and version in the opposite offset - TimeInExtremeHighMinutesDelta *int `json:"timeInExtremeHighMinutesDelta,omitempty"` - - // TimeInExtremeHighPercent Percentage of time spent in extreme high glucose range - TimeInExtremeHighPercent *float64 `json:"timeInExtremeHighPercent,omitempty"` - - // TimeInExtremeHighPercentDelta Difference between the timeInExtremeHighPercent in this period and version in the opposite offset - TimeInExtremeHighPercentDelta *float64 `json:"timeInExtremeHighPercentDelta,omitempty"` - - // TimeInExtremeHighRecords Counter of records in extreme high glucose range - TimeInExtremeHighRecords *int `json:"timeInExtremeHighRecords,omitempty"` - - // TimeInExtremeHighRecordsDelta Difference between the timeInExtremeHighRecords in this period and version in the opposite offset - TimeInExtremeHighRecordsDelta *int `json:"timeInExtremeHighRecordsDelta,omitempty"` - - // TimeInHighMinutes Counter of minutes spent in high glucose range - TimeInHighMinutes *int `json:"timeInHighMinutes,omitempty"` - - // TimeInHighMinutesDelta Difference between the timeInHighMinutes in this period and version in the opposite offset - TimeInHighMinutesDelta *int `json:"timeInHighMinutesDelta,omitempty"` - - // TimeInHighPercent Percentage of time spent in high glucose range - TimeInHighPercent *float64 `json:"timeInHighPercent,omitempty"` - - // TimeInHighPercentDelta Difference between the timeInHighPercent in this period and version in the opposite offset - TimeInHighPercentDelta *float64 `json:"timeInHighPercentDelta,omitempty"` - - // TimeInHighRecords Counter of records in high glucose range - TimeInHighRecords *int `json:"timeInHighRecords,omitempty"` - - // TimeInHighRecordsDelta Difference between the timeInHighRecords in this period and version in the opposite offset - TimeInHighRecordsDelta *int `json:"timeInHighRecordsDelta,omitempty"` - - // TimeInLowMinutes Counter of minutes spent in low glucose range - TimeInLowMinutes *int `json:"timeInLowMinutes,omitempty"` - - // TimeInLowMinutesDelta Difference between the timeInLowMinutes in this period and version in the opposite offset - TimeInLowMinutesDelta *int `json:"timeInLowMinutesDelta,omitempty"` - - // TimeInLowPercent Percentage of time spent in low glucose range - TimeInLowPercent *float64 `json:"timeInLowPercent,omitempty"` - - // TimeInLowPercentDelta Difference between the timeInLowPercent in this period and version in the opposite offset - TimeInLowPercentDelta *float64 `json:"timeInLowPercentDelta,omitempty"` - - // TimeInLowRecords Counter of records in low glucose range - TimeInLowRecords *int `json:"timeInLowRecords,omitempty"` - - // TimeInLowRecordsDelta Difference between the timeInLowRecords in this period and version in the opposite offset - TimeInLowRecordsDelta *int `json:"timeInLowRecordsDelta,omitempty"` - - // TimeInTargetMinutes Counter of minutes spent in target glucose range - TimeInTargetMinutes *int `json:"timeInTargetMinutes,omitempty"` - - // TimeInTargetMinutesDelta Difference between the timeInTargetMinutes in this period and version in the opposite offset - TimeInTargetMinutesDelta *int `json:"timeInTargetMinutesDelta,omitempty"` - - // TimeInTargetPercent Percentage of time spent in target glucose range - TimeInTargetPercent *float64 `json:"timeInTargetPercent,omitempty"` - - // TimeInTargetPercentDelta Difference between the timeInTargetPercent in this period and version in the opposite offset - TimeInTargetPercentDelta *float64 `json:"timeInTargetPercentDelta,omitempty"` + GlucoseManagementIndicator float64 `json:"glucoseManagementIndicator"` + HoursWithData int `json:"hoursWithData"` - // TimeInTargetRecords Counter of records in target glucose range - TimeInTargetRecords *int `json:"timeInTargetRecords,omitempty"` + // InAnyHigh Metrics for a particular range of glucose values + InAnyHigh GlucoserangeV5 `json:"inAnyHigh,omitempty"` - // TimeInTargetRecordsDelta Difference between the timeInTargetRecords in this period and version in the opposite offset - TimeInTargetRecordsDelta *int `json:"timeInTargetRecordsDelta,omitempty"` + // InAnyLow Metrics for a particular range of glucose values + InAnyLow GlucoserangeV5 `json:"inAnyLow,omitempty"` - // TimeInVeryHighMinutes Counter of minutes spent in very high glucose range - TimeInVeryHighMinutes *int `json:"timeInVeryHighMinutes,omitempty"` + // InExtremeHigh Metrics for a particular range of glucose values + InExtremeHigh GlucoserangeV5 `json:"inExtremeHigh,omitempty"` - // TimeInVeryHighMinutesDelta Difference between the timeInVeryHighMinutes in this period and version in the opposite offset - TimeInVeryHighMinutesDelta *int `json:"timeInVeryHighMinutesDelta,omitempty"` + // InHigh Metrics for a particular range of glucose values + InHigh GlucoserangeV5 `json:"inHigh,omitempty"` - // TimeInVeryHighPercent Percentage of time spent in very high glucose range - TimeInVeryHighPercent *float64 `json:"timeInVeryHighPercent,omitempty"` + // InLow Metrics for a particular range of glucose values + InLow GlucoserangeV5 `json:"inLow,omitempty"` - // TimeInVeryHighPercentDelta Difference between the timeInVeryHighPercent in this period and version in the opposite offset - TimeInVeryHighPercentDelta *float64 `json:"timeInVeryHighPercentDelta,omitempty"` + // InTarget Metrics for a particular range of glucose values + InTarget GlucoserangeV5 `json:"inTarget,omitempty"` - // TimeInVeryHighRecords Counter of records in very high glucose range - TimeInVeryHighRecords *int `json:"timeInVeryHighRecords,omitempty"` + // InVeryHigh Metrics for a particular range of glucose values + InVeryHigh GlucoserangeV5 `json:"inVeryHigh,omitempty"` - // TimeInVeryHighRecordsDelta Difference between the timeInVeryHighRecords in this period and version in the opposite offset - TimeInVeryHighRecordsDelta *int `json:"timeInVeryHighRecordsDelta,omitempty"` + // InVeryLow Metrics for a particular range of glucose values + InVeryLow GlucoserangeV5 `json:"inVeryLow,omitempty"` + StandardDeviation float64 `json:"standardDeviation"` - // TimeInVeryLowMinutes Counter of minutes spent in very low glucose range - TimeInVeryLowMinutes *int `json:"timeInVeryLowMinutes,omitempty"` - - // TimeInVeryLowMinutesDelta Difference between the timeInVeryLowMinutes in this period and version in the opposite offset - TimeInVeryLowMinutesDelta *int `json:"timeInVeryLowMinutesDelta,omitempty"` - - // TimeInVeryLowPercent Percentage of time spent in very low glucose range - TimeInVeryLowPercent *float64 `json:"timeInVeryLowPercent,omitempty"` - - // TimeInVeryLowPercentDelta Difference between the timeInVeryLowPercent in this period and version in the opposite offset - TimeInVeryLowPercentDelta *float64 `json:"timeInVeryLowPercentDelta,omitempty"` - - // TimeInVeryLowRecords Counter of records in very low glucose range - TimeInVeryLowRecords *int `json:"timeInVeryLowRecords,omitempty"` - - // TimeInVeryLowRecordsDelta Difference between the timeInVeryLowRecords in this period and version in the opposite offset - TimeInVeryLowRecordsDelta *int `json:"timeInVeryLowRecordsDelta,omitempty"` - - // TotalRecords Counter of records - TotalRecords *int `json:"totalRecords,omitempty"` - - // TotalRecordsDelta Difference between the totalRecords in this period and version in the opposite offset - TotalRecordsDelta *int `json:"totalRecordsDelta,omitempty"` + // Total Metrics for a particular range of glucose values + Total GlucoserangeV5 `json:"total,omitempty"` } -// CGMPeriods A map to each supported CGM summary period -type CGMPeriods map[string]CGMPeriod +// GlucoserangeV5 Metrics for a particular range of glucose values +type GlucoserangeV5 struct { + // Glucose total of glucose values + Glucose float64 `json:"glucose"` -// CGMStats A summary of a users recent CGM glucose values -type CGMStats struct { - // Buckets Rotating list containing the stats for each currently tracked hour in order - Buckets []Bucket `json:"buckets,omitempty"` + // Minutes total of minutes represented + Minutes int `json:"minutes"` - // OffsetPeriods A map to each supported CGM summary period - OffsetPeriods CGMPeriods `json:"offsetPeriods,omitempty"` + // Percent percent of all ranges this range represents, or total CGM use in a Total range + Percent float64 `json:"percent"` - // Periods A map to each supported CGM summary period - Periods CGMPeriods `json:"periods,omitempty"` + // Records total records + Records int `json:"records"` - // TotalHours Total hours represented in the hourly stats - TotalHours int `json:"totalHours"` + // Variance total variance of the values in this range + Variance float64 `json:"variance"` } -// Config Summary schema version and calculation configuration -type Config struct { - // HighGlucoseThreshold Threshold used for determining if a value is high - HighGlucoseThreshold float64 `json:"highGlucoseThreshold"` +// GlucoserangesV5 glucose ranges +type GlucoserangesV5 struct { + // InAnyHigh Metrics for a particular range of glucose values + InAnyHigh GlucoserangeV5 `json:"inAnyHigh,omitempty"` - // LowGlucoseThreshold Threshold used for determining if a value is low - LowGlucoseThreshold float64 `json:"lowGlucoseThreshold"` + // InAnyLow Metrics for a particular range of glucose values + InAnyLow GlucoserangeV5 `json:"inAnyLow,omitempty"` - // SchemaVersion Summary schema version - SchemaVersion int `json:"schemaVersion"` + // InExtremeHigh Metrics for a particular range of glucose values + InExtremeHigh GlucoserangeV5 `json:"inExtremeHigh,omitempty"` - // VeryHighGlucoseThreshold Threshold used for determining if a value is very high - VeryHighGlucoseThreshold float64 `json:"veryHighGlucoseThreshold"` + // InHigh Metrics for a particular range of glucose values + InHigh GlucoserangeV5 `json:"inHigh,omitempty"` - // VeryLowGlucoseThreshold Threshold used for determining if a value is very low - VeryLowGlucoseThreshold float64 `json:"veryLowGlucoseThreshold"` -} + // InLow Metrics for a particular range of glucose values + InLow GlucoserangeV5 `json:"inLow,omitempty"` -// ContinuousBucketData Series of counters which represent one hour of a users data -type ContinuousBucketData struct { - // DeferredRecords Counter of records uploaded later than 24 hours of their timestamp - DeferredRecords int `json:"deferredRecords"` + // InTarget Metrics for a particular range of glucose values + InTarget GlucoserangeV5 `json:"inTarget,omitempty"` - // RealtimeRecords Counter of records uploaded within 24 hours of their timestamp - RealtimeRecords int `json:"realtimeRecords"` + // InVeryHigh Metrics for a particular range of glucose values + InVeryHigh GlucoserangeV5 `json:"inVeryHigh,omitempty"` - // TotalRecords Counter of records from continuous uploads - TotalRecords int `json:"totalRecords"` -} + // InVeryLow Metrics for a particular range of glucose values + InVeryLow GlucoserangeV5 `json:"inVeryLow,omitempty"` -// ContinuousPeriod Summary of a specific continuous upload time period (currently: 1d, 7d, 14d, 30d) -type ContinuousPeriod struct { - AverageDailyRecords float64 `json:"averageDailyRecords"` - DeferredPercent float64 `json:"deferredPercent"` - DeferredRecords int `json:"deferredRecords"` - RealtimePercent float64 `json:"realtimePercent"` - RealtimeRecords int `json:"realtimeRecords"` - TotalRecords int `json:"totalRecords"` + // Total Metrics for a particular range of glucose values + Total GlucoserangeV5 `json:"total,omitempty"` } -// ContinuousPeriods A map to each supported CGM summary period -type ContinuousPeriods map[string]ContinuousPeriod - -// ContinuousStats A summary of a users recent CGM glucose values -type ContinuousStats struct { - // Buckets Rotating list containing the stats for each currently tracked hour in order - Buckets []Bucket `json:"buckets,omitempty"` +// SummaryV5 defines model for summary.v5. +type SummaryV5 struct { + // Config Summary schema version and calculation configuration + Config ConfigV1 `json:"config,omitempty"` - // OffsetPeriods A map to each supported CGM summary period - OffsetPeriods ContinuousPeriods `json:"offsetPeriods,omitempty"` + // Dates dates tracked for summary calculation + Dates DatesV5 `json:"dates,omitempty"` + Periods *SummaryV5_Periods `json:"periods,omitempty"` - // Periods A map to each supported CGM summary period - Periods ContinuousPeriods `json:"periods,omitempty"` + // Type Field which contains a summary type string. + Type SummarytypeV5 `json:"type,omitempty"` - // TotalHours Total hours represented in the hourly stats - TotalHours int `json:"totalHours"` + // UserId String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) + UserId *Tidepooluserid `json:"userId,omitempty"` } -// Dates dates tracked for summary calculation -type Dates struct { - // FirstData Date of the first included value - FirstData *time.Time `json:"firstData,omitempty"` - HasFirstData bool `json:"hasFirstData"` - HasLastData bool `json:"hasLastData"` - HasLastUploadDate bool `json:"hasLastUploadDate"` - HasOutdatedSince bool `json:"hasOutdatedSince"` - - // LastData Date of the last calculated value - LastData *time.Time `json:"lastData,omitempty"` - - // LastUpdatedDate Date of the last calculation - LastUpdatedDate *time.Time `json:"lastUpdatedDate,omitempty"` - - // LastUpdatedReason List of reasons the summary was updated for - LastUpdatedReason *[]string `json:"lastUpdatedReason,omitempty"` - - // LastUploadDate Created date of the last calculated value - LastUploadDate *time.Time `json:"lastUploadDate,omitempty"` - - // OutdatedReason List of reasons the summary was marked outdated for - OutdatedReason *[]string `json:"outdatedReason,omitempty"` - - // OutdatedSince Date of the first user upload after lastData, removed when calculated - OutdatedSince *time.Time `json:"outdatedSince,omitempty"` - - // OutdatedSinceLimit Upper limit of the OutdatedSince value to prevent infinite queue duration - OutdatedSinceLimit *time.Time `json:"outdatedSinceLimit,omitempty"` +// SummaryV5_Periods defines model for SummaryV5.Periods. +type SummaryV5_Periods struct { + union json.RawMessage } -// Summary A summary of a users recent data -type Summary struct { +// SummarybaseV5 shared properties of all summaries +type SummarybaseV5 struct { // Config Summary schema version and calculation configuration - Config Config `json:"config,omitempty"` + Config ConfigV1 `json:"config,omitempty"` // Dates dates tracked for summary calculation - Dates Dates `json:"dates,omitempty"` - Stats *Summary_Stats `json:"stats,omitempty"` + Dates DatesV5 `json:"dates,omitempty"` // Type Field which contains a summary type string. - Type SummaryTypeSchema `json:"type,omitempty"` - UpdateWithoutChangeCount int `json:"updateWithoutChangeCount"` + Type SummarytypeV5 `json:"type,omitempty"` // UserId String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) - UserId *TidepoolUserId `json:"userId,omitempty"` + UserId *Tidepooluserid `json:"userId,omitempty"` } -// Summary_Stats defines model for Summary.Stats. -type Summary_Stats struct { - union json.RawMessage -} - -// SummaryTypeSchema Field which contains a summary type string. -type SummaryTypeSchema = string +// SummarytypeV5 Field which contains a summary type string. +type SummarytypeV5 = string -// TidepoolUserId String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) -type TidepoolUserId = string +// Tidepooluserid String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) +type Tidepooluserid = string // SummaryType Field which contains a summary type string. -type SummaryType = SummaryTypeSchema +type SummaryType = SummarytypeV5 // UserId String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) -type UserId = TidepoolUserId - -// AsBGMBucketData returns the union data inside the Bucket_Data as a BGMBucketData -func (t Bucket_Data) AsBGMBucketData() (BGMBucketData, error) { - var body BGMBucketData - err := json.Unmarshal(t.union, &body) - return body, err -} - -// FromBGMBucketData overwrites any union data inside the Bucket_Data as the provided BGMBucketData -func (t *Bucket_Data) FromBGMBucketData(v BGMBucketData) error { - b, err := json.Marshal(v) - t.union = b - return err -} - -// MergeBGMBucketData performs a merge with any union data inside the Bucket_Data, using the provided BGMBucketData -func (t *Bucket_Data) MergeBGMBucketData(v BGMBucketData) error { - b, err := json.Marshal(v) - if err != nil { - return err - } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err -} - -// AsCGMBucketData returns the union data inside the Bucket_Data as a CGMBucketData -func (t Bucket_Data) AsCGMBucketData() (CGMBucketData, error) { - var body CGMBucketData - err := json.Unmarshal(t.union, &body) - return body, err -} - -// FromCGMBucketData overwrites any union data inside the Bucket_Data as the provided CGMBucketData -func (t *Bucket_Data) FromCGMBucketData(v CGMBucketData) error { - b, err := json.Marshal(v) - t.union = b - return err -} - -// MergeCGMBucketData performs a merge with any union data inside the Bucket_Data, using the provided CGMBucketData -func (t *Bucket_Data) MergeCGMBucketData(v CGMBucketData) error { - b, err := json.Marshal(v) - if err != nil { - return err - } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err -} - -// AsContinuousBucketData returns the union data inside the Bucket_Data as a ContinuousBucketData -func (t Bucket_Data) AsContinuousBucketData() (ContinuousBucketData, error) { - var body ContinuousBucketData - err := json.Unmarshal(t.union, &body) - return body, err -} - -// FromContinuousBucketData overwrites any union data inside the Bucket_Data as the provided ContinuousBucketData -func (t *Bucket_Data) FromContinuousBucketData(v ContinuousBucketData) error { - b, err := json.Marshal(v) - t.union = b - return err -} - -// MergeContinuousBucketData performs a merge with any union data inside the Bucket_Data, using the provided ContinuousBucketData -func (t *Bucket_Data) MergeContinuousBucketData(v ContinuousBucketData) error { - b, err := json.Marshal(v) - if err != nil { - return err - } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err -} - -func (t Bucket_Data) MarshalJSON() ([]byte, error) { - b, err := t.union.MarshalJSON() - return b, err -} - -func (t *Bucket_Data) UnmarshalJSON(b []byte) error { - err := t.union.UnmarshalJSON(b) - return err -} +type UserId = Tidepooluserid -// AsCGMStats returns the union data inside the Summary_Stats as a CGMStats -func (t Summary_Stats) AsCGMStats() (CGMStats, error) { - var body CGMStats +// AsCgmperiodsV5 returns the union data inside the SummaryV5_Periods as a CgmperiodsV5 +func (t SummaryV5_Periods) AsCgmperiodsV5() (CgmperiodsV5, error) { + var body CgmperiodsV5 err := json.Unmarshal(t.union, &body) return body, err } -// FromCGMStats overwrites any union data inside the Summary_Stats as the provided CGMStats -func (t *Summary_Stats) FromCGMStats(v CGMStats) error { +// FromCgmperiodsV5 overwrites any union data inside the SummaryV5_Periods as the provided CgmperiodsV5 +func (t *SummaryV5_Periods) FromCgmperiodsV5(v CgmperiodsV5) error { b, err := json.Marshal(v) t.union = b return err } -// MergeCGMStats performs a merge with any union data inside the Summary_Stats, using the provided CGMStats -func (t *Summary_Stats) MergeCGMStats(v CGMStats) error { +// MergeCgmperiodsV5 performs a merge with any union data inside the SummaryV5_Periods, using the provided CgmperiodsV5 +func (t *SummaryV5_Periods) MergeCgmperiodsV5(v CgmperiodsV5) error { b, err := json.Marshal(v) if err != nil { return err @@ -731,22 +257,22 @@ func (t *Summary_Stats) MergeCGMStats(v CGMStats) error { return err } -// AsBGMStats returns the union data inside the Summary_Stats as a BGMStats -func (t Summary_Stats) AsBGMStats() (BGMStats, error) { - var body BGMStats +// AsBgmperiodsV5 returns the union data inside the SummaryV5_Periods as a BgmperiodsV5 +func (t SummaryV5_Periods) AsBgmperiodsV5() (BgmperiodsV5, error) { + var body BgmperiodsV5 err := json.Unmarshal(t.union, &body) return body, err } -// FromBGMStats overwrites any union data inside the Summary_Stats as the provided BGMStats -func (t *Summary_Stats) FromBGMStats(v BGMStats) error { +// FromBgmperiodsV5 overwrites any union data inside the SummaryV5_Periods as the provided BgmperiodsV5 +func (t *SummaryV5_Periods) FromBgmperiodsV5(v BgmperiodsV5) error { b, err := json.Marshal(v) t.union = b return err } -// MergeBGMStats performs a merge with any union data inside the Summary_Stats, using the provided BGMStats -func (t *Summary_Stats) MergeBGMStats(v BGMStats) error { +// MergeBgmperiodsV5 performs a merge with any union data inside the SummaryV5_Periods, using the provided BgmperiodsV5 +func (t *SummaryV5_Periods) MergeBgmperiodsV5(v BgmperiodsV5) error { b, err := json.Marshal(v) if err != nil { return err @@ -757,22 +283,22 @@ func (t *Summary_Stats) MergeBGMStats(v BGMStats) error { return err } -// AsContinuousStats returns the union data inside the Summary_Stats as a ContinuousStats -func (t Summary_Stats) AsContinuousStats() (ContinuousStats, error) { - var body ContinuousStats +// AsContinuousperiodsV5 returns the union data inside the SummaryV5_Periods as a ContinuousperiodsV5 +func (t SummaryV5_Periods) AsContinuousperiodsV5() (ContinuousperiodsV5, error) { + var body ContinuousperiodsV5 err := json.Unmarshal(t.union, &body) return body, err } -// FromContinuousStats overwrites any union data inside the Summary_Stats as the provided ContinuousStats -func (t *Summary_Stats) FromContinuousStats(v ContinuousStats) error { +// FromContinuousperiodsV5 overwrites any union data inside the SummaryV5_Periods as the provided ContinuousperiodsV5 +func (t *SummaryV5_Periods) FromContinuousperiodsV5(v ContinuousperiodsV5) error { b, err := json.Marshal(v) t.union = b return err } -// MergeContinuousStats performs a merge with any union data inside the Summary_Stats, using the provided ContinuousStats -func (t *Summary_Stats) MergeContinuousStats(v ContinuousStats) error { +// MergeContinuousperiodsV5 performs a merge with any union data inside the SummaryV5_Periods, using the provided ContinuousperiodsV5 +func (t *SummaryV5_Periods) MergeContinuousperiodsV5(v ContinuousperiodsV5) error { b, err := json.Marshal(v) if err != nil { return err @@ -783,12 +309,12 @@ func (t *Summary_Stats) MergeContinuousStats(v ContinuousStats) error { return err } -func (t Summary_Stats) MarshalJSON() ([]byte, error) { +func (t SummaryV5_Periods) MarshalJSON() ([]byte, error) { b, err := t.union.MarshalJSON() return b, err } -func (t *Summary_Stats) UnmarshalJSON(b []byte) error { +func (t *SummaryV5_Periods) UnmarshalJSON(b []byte) error { err := t.union.UnmarshalJSON(b) return err } diff --git a/go.mod b/go.mod index aecedd9..9cc825b 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module github.com/tidepool-org/go-common -go 1.22 +go 1.24.0 require ( - github.com/IBM/sarama v1.43.2 + github.com/IBM/sarama v1.45.0 github.com/avast/retry-go v3.0.0+incompatible github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.15.2 github.com/cloudevents/sdk-go/v2 v2.15.2 @@ -16,11 +16,11 @@ require ( require ( github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect - github.com/eapache/go-resiliency v1.6.0 // indirect + github.com/eapache/go-resiliency v1.7.0 // indirect github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 // indirect github.com/eapache/queue v1.1.0 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -31,17 +31,17 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.4 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/klauspost/compress v1.17.9 // indirect + github.com/klauspost/compress v1.18.0 // indirect github.com/kr/pretty v0.3.1 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/pierrec/lz4/v4 v4.1.21 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect - github.com/rogpeppe/go-internal v1.12.0 // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect github.com/xdg/stringprep v1.0.3 // indirect go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.27.0 // indirect - golang.org/x/crypto v0.32.0 // indirect - golang.org/x/net v0.34.0 // indirect - golang.org/x/text v0.21.0 // indirect + golang.org/x/crypto v0.35.0 // indirect + golang.org/x/net v0.35.0 // indirect + golang.org/x/text v0.22.0 // indirect ) diff --git a/go.sum b/go.sum index c5ab4c5..44cceac 100644 --- a/go.sum +++ b/go.sum @@ -1,21 +1,11 @@ -github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= -github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= -github.com/CloudyKit/jet/v6 v6.2.0/go.mod h1:d3ypHeIRNo2+XyqnGA8s+aphtcVpjP5hPwP/Lzo7Ro4= -github.com/IBM/sarama v1.43.2 h1:HABeEqRUh32z8yzY2hGB/j8mHSzC/HA9zlEjqFNCzSw= -github.com/IBM/sarama v1.43.2/go.mod h1:Kyo4WkF24Z+1nz7xeVUFWIuKVV8RS3wM8mkvPKMdXFQ= -github.com/Joker/jade v1.1.3/go.mod h1:T+2WLyt7VH6Lp0TRxQrUYEs64nRc83wkMQrfeIQKduM= +github.com/IBM/sarama v1.45.0 h1:IzeBevTn809IJ/dhNKhP5mpxEXTmELuezO2tgHD9G5E= +github.com/IBM/sarama v1.45.0/go.mod h1:EEay63m8EZkeumco9TDXf2JT3uDnZsZqFgV46n4yZdY= github.com/RaveNoX/go-jsoncommentstrip v1.0.0/go.mod h1:78ihd09MekBnJnxpICcwzCMzGrKSKYe4AqU6PDYYpjk= -github.com/Shopify/goreferrer v0.0.0-20220729165902-8cddb4f5de06/go.mod h1:7erjKLwalezA0k99cWs5L11HWOAPNjdUZ6RxH1BXbbM= -github.com/andybalholm/brotli v1.0.5/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ= github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk= github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0= github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY= -github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= github.com/bmatcuk/doublestar v1.1.1/go.mod h1:UD6OnuiIn0yFxxA2le/rnRU1G4RaI4UvFv1sNto9p6w= -github.com/bytedance/sonic v1.10.0-rc3/go.mod h1:iZcSUejdk5aukTND/Eu/ivjQuEL0Cu9/rf50Hi0u/g4= -github.com/chenzhuoyu/base64x v0.0.0-20230717121745-296ad89f973d/go.mod h1:8EPpVsBuRksnlj1mLy4AWzRNQYxauNi62uWcE3to6eA= -github.com/chenzhuoyu/iasm v0.9.0/go.mod h1:Xjy2NpN3h7aUqeqM+woSuuvxmIe6+DDsiNLIrkAmYog= github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.15.2 h1:dl2xbFLV2FGd3OBNC6ncSN9l+gPNEP0DYE+1yKVV5DQ= github.com/cloudevents/sdk-go/protocol/kafka_sarama/v2 v2.15.2/go.mod h1:jXfl9I1Q78+4zdYGTjHNQcrbNtJL63jpzSgVE2rE79U= github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc= @@ -24,34 +14,23 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.6.0 h1:CqGDTLtpwuWKn6Nj3uNUdflaq+/kIPsg0gfNzHton30= -github.com/eapache/go-resiliency v1.6.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= +github.com/eapache/go-resiliency v1.7.0 h1:n3NRTnBn5N0Cbi/IeOHuQn9s2UwVUH7Ga0ZWcP+9JTA= +github.com/eapache/go-resiliency v1.7.0/go.mod h1:5yPzW0MIvSe0JDsv0v+DvcjEv2FyD6iZYSs1ZI+iQho= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 h1:Oy0F4ALJ04o5Qqpdz8XLIpNA3WM/iSIXqxtqo7UGVws= github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3/go.mod h1:YvSRo5mw33fLEx1+DlK6L2VV43tJt5Eyel9n9XBcR+0= github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= -github.com/flosch/pongo2/v4 v4.0.2/go.mod h1:B5ObFANs/36VwxxlgKpdchIJHMvHB562PW+BWPhwZD8= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= -github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= -github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= -github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomarkdown/markdown v0.0.0-20230922112808-5421fefb8386/go.mod h1:JDGcbDT52eL4fju3sZ4TeHGsQwhG9nbDV21aMyhwPoA= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -62,7 +41,6 @@ github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9 github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/iris-contrib/schema v0.0.6/go.mod h1:iYszG0IOsuIsfzjymw1kMzTL8YQcCWlm65f3wX8J5iA= github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8= github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo= @@ -75,56 +53,34 @@ github.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh6 github.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs= github.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY= github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= -github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/juju/gnuflag v0.0.0-20171113085948-2ce1bb71843d/go.mod h1:2PavIy+JPciBPrBUjwbNvtwB6RQlve+hkpll6QSNmOE= -github.com/kataras/blocks v0.0.7/go.mod h1:UJIU97CluDo0f+zEjbnbkeMRlvYORtmc1304EeyXf4I= -github.com/kataras/golog v0.1.9/go.mod h1:jlpk/bOaYCyqDqH18pgDHdaJab72yBE6i0O3s30hpWY= -github.com/kataras/iris/v12 v12.2.6-0.20230908161203-24ba4e8933b9/go.mod h1:ldkoR3iXABBeqlTibQ3MYaviA1oSlPvim6f55biwBh4= -github.com/kataras/pio v0.0.12/go.mod h1:ODK/8XBhhQ5WqrAhKy+9lTPS7sBf6O3KcLhc9klfRcY= -github.com/kataras/sitemap v0.0.6/go.mod h1:dW4dOCNs896OR1HmG+dMLdT7JjDk7mYBzoIRwuj5jA4= -github.com/kataras/tunnel v0.0.4/go.mod h1:9FkU4LaeifdMWqZu7o20ojmW4B7hdhv2CMLwfnHGpYw= github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8= github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg= -github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= -github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/labstack/echo/v4 v4.11.4/go.mod h1:noh7EvLwqDsmh/X/HWKPUl1AjzJrhyptRyEbQJfxen8= -github.com/labstack/gommon v0.4.2/go.mod h1:QlUFxVM+SNXhDL/Z7YhocGIBYOiwB0mXm1+1bAPHPyU= -github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= -github.com/mailgun/raymond/v2 v2.0.48/go.mod h1:lsgvL50kgt1ylcFJYZiULi5fjPBkkhNfj4KA0W54Z18= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/microcosm-cc/bluemonday v1.0.25/go.mod h1:ZIOjCQp1OrzBBPIJmfX4qDYFuhU02nx4bn030ixfHLE= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/oapi-codegen/runtime v1.1.1 h1:EXLHh0DXIJnWhdRPN2w4MXAzFyE4CskzhNLUmtpMYro= github.com/oapi-codegen/runtime v1.1.1/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg= -github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= -github.com/pierrec/lz4/v4 v4.1.21 h1:yOVMLb6qSIDP67pl/5F7RepeKYu/VmTyEXvuMI5d9mQ= -github.com/pierrec/lz4/v4 v4.1.21/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 h1:N/ElC8H3+5XpJzTSTfLsJV/mx9Q9g7kxmchpfZyxgzM= github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= -github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= github.com/spkg/bom v0.0.0-20160624110644-59b7046e48ad/go.mod h1:qLr4V1qq6nMqFKkMo8ZTx3f+BZEkzsRUY10Xsm2mwU0= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -134,86 +90,63 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tdewolff/minify/v2 v2.12.9/go.mod h1:qOqdlDfL+7v0/fyymB+OP497nIxJYSvX4MQWA8OoiXU= -github.com/tdewolff/parse/v2 v2.6.8/go.mod h1:XHDhaU6IBgsryfdnpzUXBlT6leW/l25yrFBTEb4eIyM= -github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= -github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= -github.com/valyala/fasttemplate v1.2.2/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ= -github.com/vmihailenco/msgpack/v5 v5.3.5/go.mod h1:7xyJ9e+0+9SaZT0Wt1RGleJXzli6Q/V5KbhBonMG9jc= -github.com/vmihailenco/tagparser/v2 v2.0.0/go.mod h1:Wri+At7QHww0WTrCBeu4J6bNtoV6mEfg5OIWRZA9qds= github.com/xdg/scram v1.0.5 h1:TuS0RFmt5Is5qm9Tm2SoD89OPqe4IRiFtyFY4iwWXsw= github.com/xdg/scram v1.0.5/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v1.0.3 h1:cmL5Enob4W83ti/ZHuZLuKD/xqJfus4fVPwE+/BDm+4= github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= -github.com/yosssi/ace v0.0.5/go.mod h1:ALfIzm2vT7t5ZE7uoIZqF3TQ7SAOyupFZnkrF5id+K0= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= -golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= -golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc= -golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc= +golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= +golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= -golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= -golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0= -golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= +golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8= +golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= -golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= +golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= -golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= +golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/spec/summary.v1.yaml b/spec/summary.v1.yaml index ac0bfab..b60a6bf 100644 --- a/spec/summary.v1.yaml +++ b/spec/summary.v1.yaml @@ -8,23 +8,23 @@ info: For more information, see the [Getting Started](../docs/quick-start.md) section. contact: name: Tidepool - url: 'https://www.tidepool.org' + url: https://www.tidepool.org email: support@tidepool.org - termsOfService: 'https://developer.tidepool.org/terms-of-use/' + termsOfService: https://developer.tidepool.org/terms-of-use/ license: name: BSD 2-Clause "Simplified" License - url: 'https://github.com/tidepool-org/summary/blob/master/LICENSE' - x-tidepool-service: 'https://github.com/tidepool-org/summary' + url: https://github.com/tidepool-org/summary/blob/master/LICENSE + x-tidepool-service: https://github.com/tidepool-org/summary servers: - - url: 'https://external.integration.tidepool.org' + - url: https://external.integration.tidepool.org description: integration - - url: 'https://api.tidepool.org' + - url: https://api.tidepool.org description: production - - url: 'https://dev1.dev.tidepool.org' + - url: https://dev1.dev.tidepool.org description: dev1 - - url: 'https://qa1.development.tidepool.org' + - url: https://qa1.development.tidepool.org description: qa1 - - url: 'https://qa2.development.tidepool.org' + - url: https://qa2.development.tidepool.org description: qa2 security: - sessionToken: [] @@ -38,7 +38,7 @@ tags: This API is for a component within the `data` service that calculates summary statistics for user accounts. These routes are mostly executed by the `task` and `clinic-worker` services. paths: - '/v1/summaries/{summaryType}/{userId}': + /v1/summaries/{summaryType}/{userId}: parameters: - $ref: '#/components/parameters/summaryType' - $ref: '#/components/parameters/userId' @@ -50,7 +50,7 @@ paths: content: application/json: schema: - $ref: '#/components/schemas/Summary' + $ref: '#/components/schemas/summary.v5' '400': description: Bad Request '403': @@ -66,1023 +66,49 @@ paths: components: schemas: TidepoolUserId: - type: string - title: Tidepool User ID - pattern: '^([0-9a-f]{10}|[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$' - description: 'String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random))' - readOnly: true - x-go-type: string + $ref: '#/components/schemas/tidepooluserid' SummaryTypeSchema: + $ref: '#/components/schemas/summarytype.v5' + Config: + $ref: '#/components/schemas/config.v1' + Dates: + $ref: '#/components/schemas/dates.v5' + GlucoseRange: + $ref: '#/components/schemas/glucoserange.v5' + GlucoseRanges: + $ref: '#/components/schemas/glucoseranges.v5' + ContinuousRanges: + $ref: '#/components/schemas/continuousranges.v5' + GlucosePeriod: + $ref: '#/components/schemas/glucoseperiod.v5' + ContinuousPeriod: + $ref: '#/components/schemas/continuousperiod.v5' + CGMPeriods: + $ref: '#/components/schemas/cgmperiods.v5' + BGMPeriods: + $ref: '#/components/schemas/bgmperiods.v5' + ContinuousPeriods: + $ref: '#/components/schemas/continuousperiods.v5' + Summary: + $ref: '#/components/schemas/summary.v5' + summarytype.v5: type: string title: Summary Type enum: - cgm - bgm - - continuous + - con description: Field which contains a summary type string. x-go-type: string x-go-type-skip-optional-pointer: true - BGMBucketData: - type: object - title: bgmbucketdata.v1 - description: Series of counters which represent one hour of a users data - properties: - lastRecordTime: - type: string - format: date-time - timeInTargetRecords: - description: Counter of records in target glucose range - type: number - example: 10 - x-go-type: int - timeInHighRecords: - description: Counter of records in high glucose range - type: number - example: 10 - x-go-type: int - timeInVeryHighRecords: - description: Counter of records in very high glucose range - type: number - example: 10 - x-go-type: int - timeInLowRecords: - description: Counter of records in low glucose range - type: number - example: 10 - x-go-type: int - timeInVeryLowRecords: - description: Counter of records in very low glucose range - type: number - example: 10 - x-go-type: int - totalGlucose: - type: number - x-go-type: float64 - description: Total value of all glucose records - example: 1234.56 - required: - - targetRecords - - highRecords - - veryHighRecords - - lowRecords - - veryLowRecords - - totalGlucose - BGMPeriod: - type: object - title: bgmperiod.v1 - description: 'Summary of a specific BGM time period (currently: 1d, 7d, 14d, 30d)' - properties: - hasAverageGlucoseMmol: - type: boolean - averageGlucoseMmol: - description: Average Glucose of records in this period - type: number - example: 5.5 - x-go-type: float64 - averageGlucoseMmolDelta: - description: Difference between the averageGlucose in this period and the other offset version - type: number - example: 2.5 - x-go-type: float64 - hasTimeInTargetPercent: - type: boolean - timeInTargetPercent: - description: Percentage of time spent in target glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInTargetPercentDelta: - description: Difference between the timeInTargetPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInTargetRecords: - type: boolean - timeInTargetRecords: - description: Counter of records in target glucose range - type: number - example: 10 - x-go-type: int - timeInTargetRecordsDelta: - description: Difference between the timeInTargetRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInHighPercent: - type: boolean - timeInHighPercent: - description: Percentage of time spent in high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInHighPercentDelta: - description: Difference between the timeInHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInHighRecords: - type: boolean - timeInHighRecords: - description: Counter of records in high glucose range - type: number - example: 10 - x-go-type: int - timeInHighRecordsDelta: - description: Difference between the timeInHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInVeryHighPercent: - type: boolean - timeInVeryHighPercent: - description: Percentage of time spent in very high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInVeryHighPercentDelta: - description: Difference between the timeInVeryHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInVeryHighRecords: - type: boolean - timeInVeryHighRecords: - description: Counter of records in very high glucose range - type: number - example: 10 - x-go-type: int - timeInVeryHighRecordsDelta: - description: Difference between the timeInVeryHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInExtremeHighPercent: - type: boolean - timeInExtremeHighPercent: - description: Percentage of time spent in extreme high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInExtremeHighPercentDelta: - description: Difference between the timeInExtremeHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInExtremeHighRecords: - type: boolean - timeInExtremeHighRecords: - description: Counter of records in extreme high glucose range - type: number - example: 10 - x-go-type: int - timeInExtremeHighRecordsDelta: - description: Difference between the timeInExtremeHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInAnyHighPercent: - type: boolean - timeInAnyHighPercent: - description: Percentage of time spent in Any high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInAnyHighPercentDelta: - description: Difference between the timeInAnyHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInAnyHighRecords: - type: boolean - timeInAnyHighRecords: - description: Counter of records in Any high glucose range - type: number - example: 10 - x-go-type: int - timeInAnyHighRecordsDelta: - description: Difference between the timeInAnyHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInLowPercent: - type: boolean - timeInLowPercent: - description: Percentage of time spent in low glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInLowPercentDelta: - description: Difference between the timeInLowPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInLowRecords: - type: boolean - timeInLowRecords: - description: Counter of records in low glucose range - type: number - example: 10 - x-go-type: int - timeInLowRecordsDelta: - description: Difference between the timeInLowRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInVeryLowPercent: - type: boolean - timeInVeryLowPercent: - description: Percentage of time spent in very low glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInVeryLowPercentDelta: - description: Difference between the timeInVeryLowPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInVeryLowRecords: - type: boolean - timeInVeryLowRecords: - description: Counter of records in very low glucose range - type: number - example: 10 - x-go-type: int - timeInVeryLowRecordsDelta: - description: Difference between the timeInVeryLowRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInAnyLowPercent: - type: boolean - timeInAnyLowPercent: - description: Percentage of time spent in Any low glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInAnyLowPercentDelta: - description: Difference between the timeInAnyLowPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInAnyLowRecords: - type: boolean - timeInAnyLowRecords: - description: Counter of records in Any low glucose range - type: number - example: 10 - x-go-type: int - timeInAnyLowRecordsDelta: - description: Difference between the timeInAnyLowRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTotalRecords: - type: boolean - totalRecords: - description: Counter of records - type: number - example: 10 - x-go-type: int - totalRecordsDelta: - description: Difference between the totalRecords in this period and version in the opposite offset - type: number - example: 10 - x-go-type: int - hasAverageDailyRecords: - type: boolean - averageDailyRecords: - description: Average daily readings - type: number - example: 3.5 - x-go-type: float64 - averageDailyRecordsDelta: - description: Difference between the averageDailyRecords in this period and version in the opposite offset - type: number - example: 2.5 - x-go-type: float64 - required: - - hasAverageGlucoseMmol - - hasTimeInTargetPercent - - hasTimeInTargetRecords - - hasTimeInHighPercent - - hasTimeInHighRecords - - hasTimeInVeryHighPercent - - hasTimeInVeryHighRecords - - hasTimeInExtremeHighPercent - - hasTimeInExtremeHighRecords - - hasTimeInLowPercent - - hasTimeInLowRecords - - hasTimeInVeryLowPercent - - hasTimeInVeryLowRecords - - hasTimeInAnyLowPercent - - hasTimeInAnyLowRecords - - hasTimeInAnyHighPercent - - hasTimeInAnyHighRecords - - hasTotalRecords - - hasAverageDailyRecords - BGMPeriods: - type: object - title: bgmperiods.v1 - description: A map to each supported BGM summary period - x-go-type-skip-optional-pointer: true - additionalProperties: - $ref: '#/components/schemas/BGMPeriod' - BGMStats: - type: object - title: bgmstats.v1 - description: A summary of a users recent BGM glucose values - properties: - totalHours: - description: Total hours represented in the hourly stats - type: number - x-go-type: int - periods: - $ref: '#/components/schemas/BGMPeriods' - offsetPeriods: - $ref: '#/components/schemas/BGMPeriods' - buckets: - type: array - x-go-type-skip-optional-pointer: true - description: Rotating list containing the stats for each currently tracked hour in order - items: - $ref: '#/components/schemas/Bucket' - required: - - totalHours - CGMBucketData: - type: object - title: cgmbucketdata.v1 - description: Series of counters which represent one hour of a users data - properties: - targetMinutes: - description: Counter of minutes spent in target glucose range - type: number - example: 200 - x-go-type: int - targetRecords: - description: Counter of records in target glucose range - type: number - example: 10 - x-go-type: int - highMinutes: - description: Counter of minutes spent in high glucose range - type: number - example: 200 - x-go-type: int - highRecords: - description: Counter of records in high glucose range - type: number - example: 10 - x-go-type: int - veryHighMinutes: - description: Counter of minutes spent in very high glucose range - type: number - example: 200 - x-go-type: int - veryHighRecords: - description: Counter of records in very high glucose range - type: number - example: 10 - x-go-type: int - lowMinutes: - description: Counter of minutes spent in low glucose range - type: number - example: 200 - x-go-type: int - lowRecords: - description: Counter of records in low glucose range - type: number - example: 10 - x-go-type: int - veryLowMinutes: - description: Counter of minutes spent in very low glucose range - type: number - example: 200 - x-go-type: int - veryLowRecords: - description: Counter of records in very low glucose range - type: number - example: 10 - x-go-type: int - totalGlucose: - type: number - x-go-type: float64 - description: Total value of all glucose records - example: 1234.56 - totalMinutes: - description: Counter of minutes using a cgm - type: number - example: 200 - x-go-type: int - totalRecords: - description: Counter of records using a cgm - type: number - example: 10 - x-go-type: int - totalVariance: - type: number - x-go-type: float64 - description: Total variance of all glucose records - example: 1234.56 - required: - - targetMinutes - - targetRecords - - highMinutes - - highRecords - - veryHighMinutes - - veryHighRecords - - lowMinutes - - lowRecords - - veryLowMinutes - - veryLowRecords - - totalGlucose - - totalMinutes - - totalRecords - - totalVariance - CGMPeriod: - type: object - title: cgmperiod.v1 - description: 'Summary of a specific CGM time period (currently: 1d, 7d, 14d, 30d)' - properties: - hasAverageGlucoseMmol: - type: boolean - averageGlucoseMmol: - description: Average Glucose of records in this period - type: number - example: 5.5 - x-go-type: float64 - averageGlucoseMmolDelta: - description: Difference between the averageGlucose in this period and the other offset version - type: number - example: 2.5 - x-go-type: float64 - hasGlucoseManagementIndicator: - type: boolean - glucoseManagementIndicator: - type: number - x-go-type: float64 - description: A derived value which emulates A1C - example: 7.5 - glucoseManagementIndicatorDelta: - type: number - x-go-type: float64 - description: Difference between the glucoseManagementIndicator in this period and the other offset version - example: 7.5 - hasTimeInTargetPercent: - type: boolean - timeInTargetPercent: - description: Percentage of time spent in target glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInTargetPercentDelta: - description: Difference between the timeInTargetPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInTargetMinutes: - type: boolean - timeInTargetMinutes: - description: Counter of minutes spent in target glucose range - type: number - example: 200 - x-go-type: int - timeInTargetMinutesDelta: - description: Difference between the timeInTargetMinutes in this period and version in the opposite offset - type: number - example: 20 - x-go-type: int - hasTimeInTargetRecords: - type: boolean - timeInTargetRecords: - description: Counter of records in target glucose range - type: number - example: 10 - x-go-type: int - timeInTargetRecordsDelta: - description: Difference between the timeInTargetRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInHighPercent: - type: boolean - timeInHighPercent: - description: Percentage of time spent in high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInHighPercentDelta: - description: Difference between the timeInHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInHighMinutes: - type: boolean - timeInHighMinutes: - description: Counter of minutes spent in high glucose range - type: number - example: 200 - x-go-type: int - timeInHighMinutesDelta: - description: Difference between the timeInHighMinutes in this period and version in the opposite offset - type: number - example: 20 - x-go-type: int - hasTimeInHighRecords: - type: boolean - timeInHighRecords: - description: Counter of records in high glucose range - type: number - example: 10 - x-go-type: int - timeInHighRecordsDelta: - description: Difference between the timeInHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInVeryHighPercent: - type: boolean - timeInVeryHighPercent: - description: Percentage of time spent in very high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInVeryHighPercentDelta: - description: Difference between the timeInVeryHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInVeryHighMinutes: - type: boolean - timeInVeryHighMinutes: - description: Counter of minutes spent in very high glucose range - type: number - example: 200 - x-go-type: int - timeInVeryHighMinutesDelta: - description: Difference between the timeInVeryHighMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInVeryHighRecords: - type: boolean - timeInVeryHighRecords: - description: Counter of records in very high glucose range - type: number - example: 10 - x-go-type: int - timeInVeryHighRecordsDelta: - description: Difference between the timeInVeryHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInExtremeHighPercent: - type: boolean - timeInExtremeHighPercent: - description: Percentage of time spent in extreme high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInExtremeHighPercentDelta: - description: Difference between the timeInExtremeHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInExtremeHighMinutes: - type: boolean - timeInExtremeHighMinutes: - description: Counter of minutes spent in extreme high glucose range - type: number - example: 200 - x-go-type: int - timeInExtremeHighMinutesDelta: - description: Difference between the timeInExtremeHighMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInExtremeHighRecords: - type: boolean - timeInExtremeHighRecords: - description: Counter of records in extreme high glucose range - type: number - example: 10 - x-go-type: int - timeInExtremeHighRecordsDelta: - description: Difference between the timeInExtremeHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInAnyHighPercent: - type: boolean - timeInAnyHighPercent: - description: Percentage of time spent in Any high glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInAnyHighPercentDelta: - description: Difference between the timeInAnyHighPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInAnyHighMinutes: - type: boolean - timeInAnyHighMinutes: - description: Counter of minutes spent in Any high glucose range - type: number - example: 200 - x-go-type: int - timeInAnyHighMinutesDelta: - description: Difference between the timeInAnyHighMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInAnyHighRecords: - type: boolean - timeInAnyHighRecords: - description: Counter of records in Any high glucose range - type: number - example: 10 - x-go-type: int - timeInAnyHighRecordsDelta: - description: Difference between the timeInAnyHighRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInLowPercent: - type: boolean - timeInLowPercent: - description: Percentage of time spent in low glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInLowPercentDelta: - description: Difference between the timeInLowPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInLowMinutes: - type: boolean - timeInLowMinutes: - description: Counter of minutes spent in low glucose range - type: number - example: 200 - x-go-type: int - timeInLowMinutesDelta: - description: Difference between the timeInLowMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInLowRecords: - type: boolean - timeInLowRecords: - description: Counter of records in low glucose range - type: number - example: 10 - x-go-type: int - timeInLowRecordsDelta: - description: Difference between the timeInLowRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInVeryLowPercent: - type: boolean - timeInVeryLowPercent: - description: Percentage of time spent in very low glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInVeryLowPercentDelta: - description: Difference between the timeInVeryLowPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInVeryLowMinutes: - type: boolean - timeInVeryLowMinutes: - description: Counter of minutes spent in very low glucose range - type: number - example: 200 - x-go-type: int - timeInVeryLowMinutesDelta: - description: Difference between the timeInVeryLowMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInVeryLowRecords: - type: boolean - timeInVeryLowRecords: - description: Counter of records in very low glucose range - type: number - example: 10 - x-go-type: int - timeInVeryLowRecordsDelta: - description: Difference between the timeInVeryLowRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInAnyLowPercent: - type: boolean - timeInAnyLowPercent: - description: Percentage of time spent in Any low glucose range - type: number - example: 0.35 - x-go-type: float64 - timeInAnyLowPercentDelta: - description: Difference between the timeInAnyLowPercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeInAnyLowMinutes: - type: boolean - timeInAnyLowMinutes: - description: Counter of minutes spent in Any low glucose range - type: number - example: 200 - x-go-type: int - timeInAnyLowMinutesDelta: - description: Difference between the timeInAnyLowMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeInAnyLowRecords: - type: boolean - timeInAnyLowRecords: - description: Counter of records in Any low glucose range - type: number - example: 10 - x-go-type: int - timeInAnyLowRecordsDelta: - description: Difference between the timeInAnyLowRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeCGMUsePercent: - type: boolean - timeCGMUsePercent: - description: Percentage of time spent wearing a cgm - type: number - example: 0.35 - x-go-type: float64 - timeCGMUsePercentDelta: - description: Difference between the timeCGMUsePercent in this period and version in the opposite offset - type: number - example: 0.2 - x-go-type: float64 - hasTimeCGMUseMinutes: - type: boolean - timeCGMUseMinutes: - description: Counter of minutes spent wearing a cgm - type: number - example: 200 - x-go-type: int - timeCGMUseMinutesDelta: - description: Difference between the timeCGMUseMinutes in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTimeCGMUseRecords: - type: boolean - timeCGMUseRecords: - description: Counter of minutes spent wearing a cgm - type: number - example: 10 - x-go-type: int - timeCGMUseRecordsDelta: - description: Difference between the timeCGMUseRecords in this period and version in the opposite offset - type: number - example: 5 - x-go-type: int - hasTotalRecords: - type: boolean - totalRecords: - description: Counter of records - type: number - example: 10 - x-go-type: int - totalRecordsDelta: - description: Difference between the totalRecords in this period and version in the opposite offset - type: number - example: 10 - x-go-type: int - hasAverageDailyRecords: - type: boolean - averageDailyRecords: - description: Average daily readings - type: number - example: 3.5 - x-go-type: float64 - averageDailyRecordsDelta: - description: Difference between the averageDailyRecords in this period and version in the opposite offset - type: number - example: 2.5 - x-go-type: float64 - standardDeviation: - type: number - example: 2.5 - x-go-type: float64 - standardDeviationDelta: - type: number - example: 2.5 - x-go-type: float64 - coefficientOfVariation: - type: number - example: 2.5 - x-go-type: float64 - coefficientOfVariationDelta: - type: number - example: 2.5 - x-go-type: float64 - hoursWithData: - type: number - example: 2 - x-go-type: int - hoursWithDataDelta: - type: number - example: 2 - x-go-type: int - daysWithData: - type: number - example: 2 - x-go-type: int - daysWithDataDelta: - type: number - example: 2 - x-go-type: int - required: - - hasAverageGlucoseMmol - - hasGlucoseManagementIndicator - - hasTimeInTargetPercent - - hasTimeInTargetMinutes - - hasTimeInTargetRecords - - hasTimeInHighPercent - - hasTimeInHighMinutes - - hasTimeInHighRecords - - hasTimeInVeryHighPercent - - hasTimeInVeryHighMinutes - - hasTimeInVeryHighRecords - - hasTimeInExtremeHighPercent - - hasTimeInExtremeHighMinutes - - hasTimeInExtremeHighRecords - - hasTimeInLowPercent - - hasTimeInLowMinutes - - hasTimeInLowRecords - - hasTimeInVeryLowPercent - - hasTimeInVeryLowMinutes - - hasTimeInVeryLowRecords - - hasTimeInAnyLowPercent - - hasTimeInAnyLowMinutes - - hasTimeInAnyLowRecords - - hasTimeInAnyHighPercent - - hasTimeInAnyHighMinutes - - hasTimeInAnyHighRecords - - hasTimeCGMUsePercent - - hasTimeCGMUseMinutes - - hasTimeCGMUseRecords - - hasTotalRecords - - hasAverageDailyRecords - - standardDeviation - - standardDeviationDelta - - coefficientOfVariation - - coefficientOfVariationDelta - - hoursWithData - - hoursWithDataDelta - - daysWithData - - daysWithDataDelta - CGMPeriods: - type: object - title: cgmperiods.v1 - x-go-type-skip-optional-pointer: true - description: A map to each supported CGM summary period - additionalProperties: - $ref: '#/components/schemas/CGMPeriod' - CGMStats: - type: object - title: cgmstats.v1 - description: A summary of a users recent CGM glucose values - properties: - totalHours: - description: Total hours represented in the hourly stats - type: number - x-go-type: int - periods: - $ref: '#/components/schemas/CGMPeriods' - offsetPeriods: - $ref: '#/components/schemas/CGMPeriods' - buckets: - type: array - x-go-type-skip-optional-pointer: true - description: Rotating list containing the stats for each currently tracked hour in order - items: - $ref: '#/components/schemas/Bucket' - required: - - totalHours - ContinuousBucketData: - type: object - title: continuousbucketdata.v1 - description: Series of counters which represent one hour of a users data - properties: - totalRecords: - description: Counter of records from continuous uploads - type: number - example: 10 - x-go-type: int - realtimeRecords: - description: Counter of records uploaded within 24 hours of their timestamp - type: number - example: 10 - x-go-type: int - deferredRecords: - description: Counter of records uploaded later than 24 hours of their timestamp - type: number - example: 10 - x-go-type: int - required: - - totalRecords - - realtimeRecords - - deferredRecords - ContinuousPeriod: - type: object - title: continuousperiod.v1 - description: 'Summary of a specific continuous upload time period (currently: 1d, 7d, 14d, 30d)' - properties: - totalRecords: - type: number - example: 5 - x-go-type: int - averageDailyRecords: - type: number - example: 12.3 - x-go-type: float64 - realtimeRecords: - type: number - example: 5 - x-go-type: int - realtimePercent: - type: number - example: 0.5 - x-go-type: float64 - deferredRecords: - type: number - example: 5 - x-go-type: int - deferredPercent: - type: number - example: 0.5 - x-go-type: float64 - required: - - totalRecords - - averageDailyRecords - - realtimeRecords - - realtimePercent - - deferredRecords - - deferredPercent - ContinuousPeriods: - type: object - title: continuousperiods.v1 - x-go-type-skip-optional-pointer: true - description: A map to each supported CGM summary period - additionalProperties: - $ref: '#/components/schemas/ContinuousPeriod' - ContinuousStats: - type: object - title: continuousstats.v1 - description: A summary of a users recent CGM glucose values - properties: - totalHours: - description: Total hours represented in the hourly stats - type: number - x-go-type: int - periods: - $ref: '#/components/schemas/ContinuousPeriods' - offsetPeriods: - $ref: '#/components/schemas/ContinuousPeriods' - buckets: - type: array - x-go-type-skip-optional-pointer: true - description: Rotating list containing the stats for each currently tracked hour in order - items: - $ref: '#/components/schemas/Bucket' - required: - - totalHours - Bucket: - type: object - title: bucket.v1 - description: bucket containing an hour of bgm or cgm aggregations - properties: - date: - type: string - format: date-time - lastRecordTime: - type: string - format: date-time - data: - oneOf: - - $ref: '#/components/schemas/BGMBucketData' - - $ref: '#/components/schemas/CGMBucketData' - - $ref: '#/components/schemas/ContinuousBucketData' - required: - - date - - lateRecordTime - Config: + tidepooluserid: + type: string + title: Tidepool User ID + pattern: ^([0-9a-f]{10}|[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$ + description: String representation of a Tidepool User ID. Old style IDs are 10-digit strings consisting of only hexadeximcal digits. New style IDs are 36-digit [UUID v4](https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_(random)) + readOnly: true + x-go-type: string + config.v1: type: object title: config.v1 x-go-type-skip-optional-pointer: true @@ -1114,9 +140,9 @@ components: - veryHighGlucoseThreshold - lowGlucoseThreshold - veryLowGlucoseThreshold - Dates: + dates.v5: type: object - title: dates.v1 + title: dates.v5 x-go-type-skip-optional-pointer: true description: dates tracked for summary calculation properties: @@ -1129,32 +155,20 @@ components: type: array items: type: string - hasFirstData: - type: boolean firstData: description: Date of the first included value type: string format: date-time - hasLastData: - type: boolean lastData: description: Date of the last calculated value type: string format: date-time - hasLastUploadDate: - type: boolean lastUploadDate: description: Created date of the last calculated value type: string format: date-time - hasOutdatedSince: - type: boolean outdatedSince: - description: 'Date of the first user upload after lastData, removed when calculated' - type: string - format: date-time - outdatedSinceLimit: - description: Upper limit of the OutdatedSince value to prevent infinite queue duration + description: Date of the first user upload after lastData, removed when calculated type: string format: date-time outdatedReason: @@ -1163,34 +177,190 @@ components: items: type: string required: - - hasFirstData - - hasLastData - - hasLastUploadDate - - hasOutdatedSince - Summary: + - firstData + - lastData + - lastUpdatedReason + - lastUpdatedDate + - lastUploadDate + - outdatedReason + summarybase.v5: type: object - title: summary.v1 - description: A summary of a users recent data + title: summarybase.v5 + description: shared properties of all summaries properties: type: - $ref: '#/components/schemas/SummaryTypeSchema' + $ref: '#/components/schemas/summarytype.v5' userId: - $ref: '#/components/schemas/TidepoolUserId' + $ref: '#/components/schemas/tidepooluserid' config: - $ref: '#/components/schemas/Config' + $ref: '#/components/schemas/config.v1' dates: - $ref: '#/components/schemas/Dates' - stats: - oneOf: - - $ref: '#/components/schemas/CGMStats' - - $ref: '#/components/schemas/BGMStats' - - $ref: '#/components/schemas/ContinuousStats' - updateWithoutChangeCount: + $ref: '#/components/schemas/dates.v5' + glucoserange.v5: + type: object + title: glucoserange.v5 + description: Metrics for a particular range of glucose values + properties: + glucose: + description: total of glucose values + type: number + example: 55.5 + x-go-type: float64 + minutes: + description: total of minutes represented + type: number + example: 55 + x-go-type: int + records: + description: total records type: number example: 5 x-go-type: int + percent: + description: percent of all ranges this range represents, or total CGM use in a Total range + type: number + example: 0.33 + x-go-type: float64 + variance: + description: total variance of the values in this range + type: number + example: 15.5 + x-go-type: float64 + x-go-type-skip-optional-pointer: true + required: + - glucose + - minutes + - records + - percent + - variance + glucoseranges.v5: + type: object + title: glucoseranges.v5 + description: glucose ranges + properties: + total: + $ref: '#/components/schemas/glucoserange.v5' + inVeryLow: + $ref: '#/components/schemas/glucoserange.v5' + inLow: + $ref: '#/components/schemas/glucoserange.v5' + inTarget: + $ref: '#/components/schemas/glucoserange.v5' + inHigh: + $ref: '#/components/schemas/glucoserange.v5' + inVeryHigh: + $ref: '#/components/schemas/glucoserange.v5' + inExtremeHigh: + $ref: '#/components/schemas/glucoserange.v5' + inAnyLow: + $ref: '#/components/schemas/glucoserange.v5' + inAnyHigh: + $ref: '#/components/schemas/glucoserange.v5' + glucoseperiod.v5: + type: object + title: glucoseperiod.v5 + description: 'Summary of a specific CGM time period (currently: 1d, 7d, 14d, 30d)' + allOf: + - $ref: '#/components/schemas/glucoseranges.v5' + - type: object + properties: + hoursWithData: + type: number + example: 2 + x-go-type: int + daysWithData: + type: number + example: 2 + x-go-type: int + averageGlucoseMmol: + description: Average Glucose of records in this period + type: number + example: 5.5 + x-go-type: float64 + glucoseManagementIndicator: + type: number + x-go-type: float64 + description: A derived value which emulates A1C + example: 7.5 + coefficientOfVariation: + type: number + example: 2.5 + x-go-type: float64 + standardDeviation: + type: number + example: 2.5 + x-go-type: float64 + averageDailyRecords: + description: Average daily readings + type: number + example: 3.5 + x-go-type: float64 + required: + - hoursWithData + - daysWithData + - averageGlucose + - glucoseManagementIndicator + - coefficientOfVariation + - standardDeviation + - averageDailyRecords + - averageGlucoseMmol + cgmperiods.v5: + type: object + title: cgmperiods.v5 + description: A map to each supported CGM summary period + additionalProperties: + $ref: '#/components/schemas/glucoseperiod.v5' + bgmperiods.v5: + type: object + title: bgmperiods.v5 + description: A map to each supported BGM summary period + additionalProperties: + $ref: '#/components/schemas/glucoseperiod.v5' + continuousranges.v5: + type: object + title: continuousranges.v5 + description: continuous ranges + properties: + realtime: + $ref: '#/components/schemas/glucoserange.v5' + deferred: + $ref: '#/components/schemas/glucoserange.v5' + total: + $ref: '#/components/schemas/glucoserange.v5' + continuousperiod.v5: + type: object + title: continuousperiod.v5 + description: 'Summary of a specific CGM time period (currently: 1d, 7d, 14d, 30d)' + allOf: + - $ref: '#/components/schemas/continuousranges.v5' + - type: object + properties: + averageDailyRecords: + description: Average daily readings + type: number + example: 3.5 + x-go-type: float64 required: - - updateWithoutChangeCount + - averageDailyRecords + continuousperiods.v5: + type: object + title: continuousperiods.v5 + description: A map to each supported Continuous summary period + additionalProperties: + $ref: '#/components/schemas/continuousperiod.v5' + summary.v5: + type: object + title: summary.v5 + description: A summary of a users recent data + allOf: + - $ref: '#/components/schemas/summarybase.v5' + - type: object + properties: + periods: + oneOf: + - $ref: '#/components/schemas/cgmperiods.v5' + - $ref: '#/components/schemas/bgmperiods.v5' + - $ref: '#/components/schemas/continuousperiods.v5' securitySchemes: sessionToken: description: Tidepool Session Token @@ -1208,10 +378,10 @@ components: in: path required: true schema: - $ref: '#/components/schemas/SummaryTypeSchema' + $ref: '#/components/schemas/summarytype.v5' userId: name: userId in: path required: true schema: - $ref: '#/components/schemas/TidepoolUserId' + $ref: '#/components/schemas/tidepooluserid' diff --git a/vendor/github.com/IBM/sarama/.golangci.yml b/vendor/github.com/IBM/sarama/.golangci.yml index 72e3e4c..2e02940 100644 --- a/vendor/github.com/IBM/sarama/.golangci.yml +++ b/vendor/github.com/IBM/sarama/.golangci.yml @@ -1,4 +1,5 @@ run: + go: "1.20" timeout: 5m deadline: 10m @@ -57,7 +58,7 @@ linters: enable: - bodyclose - depguard - - exportloopref + # - copyloopvar - dogsled - errcheck - errorlint @@ -79,6 +80,7 @@ linters: issues: exclude: + - "G115: integer overflow conversion" - "G404: Use of weak random number generator" exclude-rules: # exclude some linters from running on certains files. diff --git a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml index 1869b81..1e64cc0 100644 --- a/vendor/github.com/IBM/sarama/.pre-commit-config.yaml +++ b/vendor/github.com/IBM/sarama/.pre-commit-config.yaml @@ -2,7 +2,7 @@ fail_fast: false default_install_hook_types: [pre-commit, commit-msg] repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.4.0 + rev: v5.0.0 hooks: - id: check-merge-conflict - id: check-yaml @@ -32,10 +32,10 @@ repos: files: \.go$ args: [] - repo: https://github.com/gitleaks/gitleaks - rev: v8.16.3 + rev: v8.21.2 hooks: - id: gitleaks - repo: https://github.com/golangci/golangci-lint - rev: v1.52.2 + rev: v1.61.0 hooks: - id: golangci-lint diff --git a/vendor/github.com/IBM/sarama/Dockerfile.kafka b/vendor/github.com/IBM/sarama/Dockerfile.kafka index ac2d47a..d2234e3 100644 --- a/vendor/github.com/IBM/sarama/Dockerfile.kafka +++ b/vendor/github.com/IBM/sarama/Dockerfile.kafka @@ -1,42 +1,64 @@ -FROM registry.access.redhat.com/ubi8/ubi-minimal:8.9@sha256:f30dbf77b075215f6c827c269c073b5e0973e5cea8dacdf7ecb6a19c868f37f2 +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.5@sha256:daa61d6103e98bccf40d7a69a0d4f8786ec390e2204fd94f7cc49053e9949360 USER root RUN microdnf update -y \ - && microdnf install -y curl gzip java-11-openjdk-headless tar tzdata-java \ + && microdnf install -y git gzip java-17-openjdk-headless tar tzdata-java \ && microdnf reinstall -y tzdata \ && microdnf clean all -ENV JAVA_HOME=/usr/lib/jvm/jre-11 +ENV JAVA_HOME=/usr/lib/jvm/jre-17 # https://docs.oracle.com/javase/7/docs/technotes/guides/net/properties.html # Ensure Java doesn't cache any dns results -RUN cd /etc/java/java-11-openjdk/*/conf/security \ +RUN cd /etc/java/java-17-openjdk/*/conf/security \ && sed -e '/networkaddress.cache.ttl/d' -e '/networkaddress.cache.negative.ttl/d' -i java.security \ && echo 'networkaddress.cache.ttl=0' >> java.security \ && echo 'networkaddress.cache.negative.ttl=0' >> java.security ARG SCALA_VERSION="2.13" -ARG KAFKA_VERSION="3.6.0" +ARG KAFKA_VERSION="3.6.2" -# https://github.com/apache/kafka/blob/9989b68d0d38c8f1357f78bf9d53a58c1476188d/tests/docker/Dockerfile#L46-L72 +WORKDIR /tmp + +# https://github.com/apache/kafka/blob/2e2b0a58eda3e677763af974a44a6aaa3c280214/tests/docker/Dockerfile#L77-L105 ARG KAFKA_MIRROR="https://s3-us-west-2.amazonaws.com/kafka-packages" SHELL ["/bin/bash", "-o", "pipefail", "-c"] -RUN mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ +RUN --mount=type=bind,target=.,rw=true \ + mkdir -p "/opt/kafka-${KAFKA_VERSION}" \ && chmod a+rw "/opt/kafka-${KAFKA_VERSION}" \ - && curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" + && if [ "$KAFKA_VERSION" = "4.0.0" ]; then \ + microdnf install -y java-17-openjdk-devel \ + && git clone --depth=50 --single-branch -b 4.0 https://github.com/apache/kafka /usr/src/kafka \ + && cd /usr/src/kafka \ + && : PIN TO COMMIT BEFORE KAFKA-17616 ZOOKEEPER REMOVAL STARTED \ + && git reset --hard d1504649fb \ + && export JAVA_TOOL_OPTIONS=-XX:MaxRAMPercentage=80 \ + && sed -e '/version=/s/-SNAPSHOT//' -e '/org.gradle.jvmargs/d' -e '/org.gradle.parallel/s/true/false/' -i gradle.properties && ./gradlew -PmaxParallelForks=1 -PmaxScalacThreads=1 --no-daemon releaseTarGz -x siteDocsTar -x javadoc \ + && tar xzf core/build/distributions/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" \ + && cp /tmp/server.properties "/opt/kafka-${KAFKA_VERSION}/config/" \ + && microdnf remove -y java-17-openjdk-devel \ + && rm -rf /usr/src/kafka ; \ + else \ + curl -s "$KAFKA_MIRROR/kafka_${SCALA_VERSION}-${KAFKA_VERSION}.tgz" | tar xz --strip-components=1 -C "/opt/kafka-${KAFKA_VERSION}" ; \ + fi # older kafka versions depend upon jaxb-api being bundled with the JDK, but it # was removed from Java 11 so work around that by including it in the kafka # libs dir regardless -WORKDIR /tmp RUN curl -sLO "https://repo1.maven.org/maven2/javax/xml/bind/jaxb-api/2.3.0/jaxb-api-2.3.0.jar" \ && for DIR in /opt/kafka-*; do cp -v jaxb-api-2.3.0.jar $DIR/libs/ ; done \ && rm -f jaxb-api-2.3.0.jar +# older kafka versions with the zookeeper 3.4.13 client aren't compatible with Java 17 so quietly bump them to 3.5.9 +RUN [ -f "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.13.jar" ] || exit 0 ; \ + rm -f "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.4.13.jar" \ + && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper/3.5.9/zookeeper-3.5.9.jar" \ + && curl --fail -sSL -o "/opt/kafka-${KAFKA_VERSION}/libs/zookeeper-jute-3.5.9.jar" "https://repo1.maven.org/maven2/org/apache/zookeeper/zookeeper-jute/3.5.9/zookeeper-jute-3.5.9.jar" + WORKDIR /opt/kafka-${KAFKA_VERSION} -ENV JAVA_MAJOR_VERSION=11 +ENV JAVA_MAJOR_VERSION=17 RUN sed -e "s/JAVA_MAJOR_VERSION=.*/JAVA_MAJOR_VERSION=${JAVA_MAJOR_VERSION}/" -i"" ./bin/kafka-run-class.sh diff --git a/vendor/github.com/IBM/sarama/Makefile b/vendor/github.com/IBM/sarama/Makefile index 7cefc2a..ba6f46e 100644 --- a/vendor/github.com/IBM/sarama/Makefile +++ b/vendor/github.com/IBM/sarama/Makefile @@ -9,7 +9,7 @@ FILES := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name TESTS := $(shell find . -name '*.go' -type f -not -name '*.pb.go' -not -name '*_generated.go' -name '*_test.go') $(GOBIN)/tparse: - GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.11.1 + GOBIN=$(GOBIN) go install github.com/mfridman/tparse@v0.16.0 get: $(GO) get ./... $(GO) mod verify diff --git a/vendor/github.com/IBM/sarama/admin.go b/vendor/github.com/IBM/sarama/admin.go index dcf1d76..8aa1f37 100644 --- a/vendor/github.com/IBM/sarama/admin.go +++ b/vendor/github.com/IBM/sarama/admin.go @@ -3,6 +3,7 @@ package sarama import ( "errors" "fmt" + "io" "math/rand" "strconv" "sync" @@ -99,6 +100,9 @@ type ClusterAdmin interface { // This operation is supported by brokers with version 0.11.0.0 or higher. DeleteACL(filter AclFilter, validateOnly bool) ([]MatchingAcl, error) + // ElectLeaders allows to trigger the election of preferred leaders for a set of partitions. + ElectLeaders(ElectionType, map[string][]int32) (map[string]map[int32]*PartitionResult, error) + // List the consumer groups available in the cluster. ListConsumerGroups() (map[string]string, error) @@ -141,6 +145,10 @@ type ClusterAdmin interface { // locally cached value if it's available. Controller() (*Broker, error) + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. + Coordinator(group string) (*Broker, error) + // Remove members from the consumer group by given member identities. // This operation is supported by brokers with version 2.3 or higher // This is for static membership feature. KIP-345 @@ -192,14 +200,25 @@ func (ca *clusterAdmin) Controller() (*Broker, error) { return ca.client.Controller() } +func (ca *clusterAdmin) Coordinator(group string) (*Broker, error) { + return ca.client.Coordinator(group) +} + func (ca *clusterAdmin) refreshController() (*Broker, error) { return ca.client.RefreshController() } -// isErrNotController returns `true` if the given error type unwraps to an -// `ErrNotController` response from Kafka -func isErrNotController(err error) bool { - return errors.Is(err, ErrNotController) +// isRetriableControllerError returns `true` if the given error type unwraps to +// an `ErrNotController` or `EOF` response from Kafka +func isRetriableControllerError(err error) bool { + return errors.Is(err, ErrNotController) || errors.Is(err, io.EOF) +} + +// isRetriableGroupCoordinatorError returns `true` if the given error type +// unwraps to an `ErrNotCoordinatorForConsumer`, +// `ErrConsumerCoordinatorNotAvailable` or `EOF` response from Kafka +func isRetriableGroupCoordinatorError(err error) bool { + return errors.Is(err, ErrNotCoordinatorForConsumer) || errors.Is(err, ErrConsumerCoordinatorNotAvailable) || errors.Is(err, io.EOF) } // retryOnError will repeatedly call the given (error-returning) func in the @@ -249,7 +268,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -266,7 +285,7 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO } if !errors.Is(topicErr.Err, ErrNoError) { - if errors.Is(topicErr.Err, ErrNotController) { + if isRetriableControllerError(topicErr.Err) { _, _ = ca.refreshController() } return topicErr @@ -278,14 +297,14 @@ func (ca *clusterAdmin) CreateTopic(topic string, detail *TopicDetail, validateO func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetadata, err error) { var response *MetadataResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { controller, err := ca.Controller() if err != nil { return err } request := NewMetadataRequest(ca.conf.Version, topics) response, err = controller.GetMetadata(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -298,7 +317,7 @@ func (ca *clusterAdmin) DescribeTopics(topics []string) (metadata []*TopicMetada func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32, err error) { var response *MetadataResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { controller, err := ca.Controller() if err != nil { return err @@ -306,7 +325,7 @@ func (ca *clusterAdmin) DescribeCluster() (brokers []*Broker, controllerID int32 request := NewMetadataRequest(ca.conf.Version, nil) response, err = controller.GetMetadata(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -438,7 +457,7 @@ func (ca *clusterAdmin) DeleteTopic(topic string) error { request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -482,7 +501,7 @@ func (ca *clusterAdmin) CreatePartitions(topic string, count int32, assignment [ request.Version = 1 } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -523,7 +542,7 @@ func (ca *clusterAdmin) AlterPartitionReassignments(topic string, assignment [][ request.AddBlock(topic, int32(i), assignment[i]) } - return ca.retryOnError(isErrNotController, func() error { + return ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -570,7 +589,7 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in request.AddBlock(topic, partitions) var rsp *ListPartitionReassignmentsResponse - err = ca.retryOnError(isErrNotController, func() error { + err = ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -578,7 +597,7 @@ func (ca *clusterAdmin) ListPartitionReassignments(topic string, partitions []in _ = b.Open(ca.client.Config()) rsp, err = b.ListPartitionReassignments(request) - if isErrNotController(err) { + if isRetriableControllerError(err) { _, _ = ca.refreshController() } return err @@ -907,15 +926,53 @@ func (ca *clusterAdmin) DeleteACL(filter AclFilter, validateOnly bool) ([]Matchi return mAcls, nil } +func (ca *clusterAdmin) ElectLeaders(electionType ElectionType, partitions map[string][]int32) (map[string]map[int32]*PartitionResult, error) { + request := &ElectLeadersRequest{ + Type: electionType, + TopicPartitions: partitions, + TimeoutMs: int32(60000), + } + + if ca.conf.Version.IsAtLeast(V2_4_0_0) { + request.Version = 2 + } else if ca.conf.Version.IsAtLeast(V0_11_0_0) { + request.Version = 1 + } + + var res *ElectLeadersResponse + if err := ca.retryOnError(isRetriableControllerError, func() error { + b, err := ca.Controller() + if err != nil { + return err + } + _ = b.Open(ca.client.Config()) + + res, err = b.ElectLeaders(request) + if err != nil { + return err + } + if !errors.Is(res.ErrorCode, ErrNoError) { + if isRetriableControllerError(res.ErrorCode) { + _, _ = ca.refreshController() + } + return res.ErrorCode + } + return nil + }); err != nil { + return nil, err + } + return res.ReplicaElectionResults, nil +} + func (ca *clusterAdmin) DescribeConsumerGroups(groups []string) (result []*GroupDescription, err error) { groupsPerBroker := make(map[*Broker][]string) for _, group := range groups { - controller, err := ca.client.Coordinator(group) + coordinator, err := ca.client.Coordinator(group) if err != nil { return nil, err } - groupsPerBroker[controller] = append(groupsPerBroker[controller], group) + groupsPerBroker[coordinator] = append(groupsPerBroker[coordinator], group) } for broker, brokerGroups := range groupsPerBroker { @@ -1007,22 +1064,36 @@ func (ca *clusterAdmin) ListConsumerGroups() (allGroups map[string]string, err e } func (ca *clusterAdmin) ListConsumerGroupOffsets(group string, topicPartitions map[string][]int32) (*OffsetFetchResponse, error) { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return nil, err - } - + var response *OffsetFetchResponse request := NewOffsetFetchRequest(ca.conf.Version, group, topicPartitions) + err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() + + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + response, err = coordinator.FetchOffset(request) + if err != nil { + return err + } + if !errors.Is(response.Err, ErrNoError) { + return response.Err + } - return coordinator.FetchOffset(request) + return nil + }) + + return response, err } func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, partition int32) error { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return err - } - + var response *DeleteOffsetsResponse request := &DeleteOffsetsRequest{ Group: group, partitions: map[string][]int32{ @@ -1030,27 +1101,35 @@ func (ca *clusterAdmin) DeleteConsumerGroupOffset(group string, topic string, pa }, } - resp, err := coordinator.DeleteOffsets(request) - if err != nil { - return err - } + return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() - if !errors.Is(resp.ErrorCode, ErrNoError) { - return resp.ErrorCode - } + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - if !errors.Is(resp.Errors[topic][partition], ErrNoError) { - return resp.Errors[topic][partition] - } - return nil + response, err = coordinator.DeleteOffsets(request) + if err != nil { + return err + } + if !errors.Is(response.ErrorCode, ErrNoError) { + return response.ErrorCode + } + if !errors.Is(response.Errors[topic][partition], ErrNoError) { + return response.Errors[topic][partition] + } + + return nil + }) } func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { - coordinator, err := ca.client.Coordinator(group) - if err != nil { - return err - } - + var response *DeleteGroupsResponse request := &DeleteGroupsRequest{ Groups: []string{group}, } @@ -1058,21 +1137,34 @@ func (ca *clusterAdmin) DeleteConsumerGroup(group string) error { request.Version = 1 } - resp, err := coordinator.DeleteGroups(request) - if err != nil { - return err - } + return ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() - groupErr, ok := resp.GroupErrorCodes[group] - if !ok { - return ErrIncompleteResponse - } + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } - if !errors.Is(groupErr, ErrNoError) { - return groupErr - } + response, err = coordinator.DeleteGroups(request) + if err != nil { + return err + } - return nil + groupErr, ok := response.GroupErrorCodes[group] + if !ok { + return ErrIncompleteResponse + } + + if !errors.Is(groupErr, ErrNoError) { + return groupErr + } + + return nil + }) } func (ca *clusterAdmin) DescribeLogDirs(brokerIds []int32) (allLogDirs map[int32][]DescribeLogDirsResponseDirMetadata, err error) { @@ -1170,7 +1262,7 @@ func (ca *clusterAdmin) AlterUserScramCredentials(u []AlterUserScramCredentialsU } var rsp *AlterUserScramCredentialsResponse - err := ca.retryOnError(isErrNotController, func() error { + err := ca.retryOnError(isRetriableControllerError, func() error { b, err := ca.Controller() if err != nil { return err @@ -1248,18 +1340,14 @@ func (ca *clusterAdmin) AlterClientQuotas(entity []QuotaEntityComponent, op Clie return nil } -func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInstanceIds []string) (*LeaveGroupResponse, error) { +func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(group string, groupInstanceIds []string) (*LeaveGroupResponse, error) { if !ca.conf.Version.IsAtLeast(V2_4_0_0) { return nil, ConfigurationError("Removing members from a consumer group headers requires Kafka version of at least v2.4.0") } - - controller, err := ca.client.Coordinator(groupId) - if err != nil { - return nil, err - } + var response *LeaveGroupResponse request := &LeaveGroupRequest{ Version: 3, - GroupId: groupId, + GroupId: group, } for _, instanceId := range groupInstanceIds { groupInstanceId := instanceId @@ -1267,5 +1355,28 @@ func (ca *clusterAdmin) RemoveMemberFromConsumerGroup(groupId string, groupInsta GroupInstanceId: &groupInstanceId, }) } - return controller.LeaveGroup(request) + err := ca.retryOnError(isRetriableGroupCoordinatorError, func() (err error) { + defer func() { + if err != nil && isRetriableGroupCoordinatorError(err) { + _ = ca.client.RefreshCoordinator(group) + } + }() + + coordinator, err := ca.client.Coordinator(group) + if err != nil { + return err + } + + response, err = coordinator.LeaveGroup(request) + if err != nil { + return err + } + if !errors.Is(response.Err, ErrNoError) { + return response.Err + } + + return nil + }) + + return response, err } diff --git a/vendor/github.com/IBM/sarama/async_producer.go b/vendor/github.com/IBM/sarama/async_producer.go index f629a6a..5f25752 100644 --- a/vendor/github.com/IBM/sarama/async_producer.go +++ b/vendor/github.com/IBM/sarama/async_producer.go @@ -13,6 +13,13 @@ import ( "github.com/rcrowley/go-metrics" ) +// ErrProducerRetryBufferOverflow is returned when the bridging retry buffer is full and OOM prevention needs to be applied. +var ErrProducerRetryBufferOverflow = errors.New("retry buffer full: message discarded to prevent buffer overflow") + +// minFunctionalRetryBufferLength is the lower limit of Producer.Retry.MaxBufferLength for it to function. +// Any non-zero maxBufferLength but less than this lower limit is pushed to the lower limit. +const minFunctionalRetryBufferLength = 4 * 1024 + // AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages // to the correct broker for the provided topic-partition, refreshing metadata as appropriate, // and parses responses for errors. You must read from the Errors() channel or the @@ -1101,7 +1108,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo bp.parent.returnSuccesses(pSet.msgs) // Retriable errors case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: if bp.parent.conf.Producer.Retry.Max <= 0 { bp.parent.abandonBrokerConnection(bp.broker) bp.parent.returnErrors(pSet.msgs, block.Err) @@ -1134,7 +1141,7 @@ func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceRespo switch block.Err { case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, - ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend, ErrKafkaStorageError: Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", bp.broker.ID(), topic, partition, block.Err) if bp.currentRetries[topic] == nil { @@ -1207,6 +1214,11 @@ func (bp *brokerProducer) handleError(sent *produceSet, err error) { // effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock // based on https://godoc.org/github.com/eapache/channels#InfiniteChannel func (p *asyncProducer) retryHandler() { + maxBufferSize := p.conf.Producer.Retry.MaxBufferLength + if 0 < maxBufferSize && maxBufferSize < minFunctionalRetryBufferLength { + maxBufferSize = minFunctionalRetryBufferLength + } + var msg *ProducerMessage buf := queue.New() @@ -1227,6 +1239,19 @@ func (p *asyncProducer) retryHandler() { } buf.Add(msg) + + if maxBufferSize > 0 && buf.Length() >= maxBufferSize { + msgToHandle := buf.Peek().(*ProducerMessage) + if msgToHandle.flags == 0 { + select { + case p.input <- msgToHandle: + buf.Remove() + default: + buf.Remove() + p.returnError(msgToHandle, ErrProducerRetryBufferOverflow) + } + } + } } } diff --git a/vendor/github.com/IBM/sarama/balance_strategy.go b/vendor/github.com/IBM/sarama/balance_strategy.go index 30d4177..b5bc30a 100644 --- a/vendor/github.com/IBM/sarama/balance_strategy.go +++ b/vendor/github.com/IBM/sarama/balance_strategy.go @@ -989,6 +989,7 @@ func (p *partitionMovements) getTheActualPartitionToBeMoved(partition topicParti return reversePairPartition } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, currentPath []string) ([]string, bool) { if src == dst { return currentPath, false @@ -1023,6 +1024,7 @@ func (p *partitionMovements) isLinked(src, dst string, pairs []consumerPair, cur return currentPath, false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { superCycle := make([]string, len(cycle)-1) for i := 0; i < len(cycle)-1; i++ { @@ -1037,6 +1039,7 @@ func (p *partitionMovements) in(cycle []string, cycles [][]string) bool { return false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { cycles := make([][]string, 0) for _, pair := range pairs { @@ -1068,6 +1071,7 @@ func (p *partitionMovements) hasCycles(pairs []consumerPair) bool { return false } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func (p *partitionMovements) isSticky() bool { for topic, movements := range p.PartitionMovementsByTopic { movementPairs := make([]consumerPair, len(movements)) @@ -1085,6 +1089,7 @@ func (p *partitionMovements) isSticky() bool { return true } +//nolint:unused // this is used but only in unittests as a helper (which are excluded by the integration build tag) func indexOfSubList(source []string, target []string) int { targetSize := len(target) maxCandidate := len(source) - targetSize diff --git a/vendor/github.com/IBM/sarama/broker.go b/vendor/github.com/IBM/sarama/broker.go index d0d5b87..c4f1005 100644 --- a/vendor/github.com/IBM/sarama/broker.go +++ b/vendor/github.com/IBM/sarama/broker.go @@ -243,9 +243,9 @@ func (b *Broker) Open(conf *Config) error { if b.connErr != nil { err = b.conn.Close() if err == nil { - DebugLogger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s due to SASL v0 auth error: %s\n", b.addr, b.connErr) } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + Logger.Printf("Error while closing connection to broker %s (due to SASL v0 auth error: %s): %s\n", b.addr, b.connErr, err) } b.conn = nil atomic.StoreInt32(&b.opened, 0) @@ -264,9 +264,9 @@ func (b *Broker) Open(conf *Config) error { <-b.done err = b.conn.Close() if err == nil { - DebugLogger.Printf("Closed connection to broker %s\n", b.addr) + DebugLogger.Printf("Closed connection to broker %s due to SASL v1 auth error: %s\n", b.addr, b.connErr) } else { - Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + Logger.Printf("Error while closing connection to broker %s (due to SASL v1 auth error: %s): %s\n", b.addr, b.connErr, err) } b.conn = nil atomic.StoreInt32(&b.opened, 0) @@ -689,6 +689,18 @@ func (b *Broker) ListPartitionReassignments(request *ListPartitionReassignmentsR return response, nil } +// ElectLeaders sends aa elect leaders request and returns list partitions elect result +func (b *Broker) ElectLeaders(request *ElectLeadersRequest) (*ElectLeadersResponse, error) { + response := new(ElectLeadersResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + // DeleteRecords send a request to delete records and return delete record // response or error func (b *Broker) DeleteRecords(request *DeleteRecordsRequest) (*DeleteRecordsResponse, error) { @@ -1242,12 +1254,12 @@ func (b *Broker) authenticateViaSASLv1() error { handshakeErr := b.sendInternal(handshakeRequest, prom) if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr) return handshakeErr } handshakeErr = handleResponsePromise(handshakeRequest, handshakeResponse, prom, metricRegistry) if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + Logger.Printf("Error while handling SASL handshake response %s: %s\n", b.addr, handshakeErr) return handshakeErr } @@ -1267,7 +1279,7 @@ func (b *Broker) authenticateViaSASLv1() error { } authErr = handleResponsePromise(authenticateRequest, authenticateResponse, prom, metricRegistry) if authErr != nil { - Logger.Printf("Error while performing SASL Auth %s\n", b.addr) + Logger.Printf("Error while performing SASL Auth %s: %s\n", b.addr, authErr) return nil, authErr } @@ -1385,7 +1397,7 @@ func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { if b.conf.Net.SASL.Handshake { handshakeErr := b.sendAndReceiveSASLHandshake(SASLTypePlaintext, b.conf.Net.SASL.Version) if handshakeErr != nil { - Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + Logger.Printf("Error while performing SASL handshake %s: %s\n", b.addr, handshakeErr) return handshakeErr } } @@ -1426,9 +1438,6 @@ func (b *Broker) sendAndReceiveSASLPlainAuthV0() error { func (b *Broker) sendAndReceiveSASLPlainAuthV1(authSendReceiver func(authBytes []byte) (*SaslAuthenticateResponse, error)) error { authBytes := []byte(b.conf.Net.SASL.AuthIdentity + "\x00" + b.conf.Net.SASL.User + "\x00" + b.conf.Net.SASL.Password) _, err := authSendReceiver(authBytes) - if err != nil { - return err - } return err } diff --git a/vendor/github.com/IBM/sarama/client.go b/vendor/github.com/IBM/sarama/client.go index 2decba7..5c54b44 100644 --- a/vendor/github.com/IBM/sarama/client.go +++ b/vendor/github.com/IBM/sarama/client.go @@ -363,34 +363,19 @@ func (client *client) MetadataTopics() ([]string, error) { } func (client *client) Partitions(topic string) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - partitions := client.cachedPartitions(topic, allPartitions) - - if len(partitions) == 0 { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - partitions = client.cachedPartitions(topic, allPartitions) - } - - // no partitions found after refresh metadata - if len(partitions) == 0 { - return nil, ErrUnknownTopicOrPartition - } - - return partitions, nil + return client.getPartitions(topic, allPartitions) } func (client *client) WritablePartitions(topic string) ([]int32, error) { + return client.getPartitions(topic, writablePartitions) +} + +func (client *client) getPartitions(topic string, pt partitionType) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } - partitions := client.cachedPartitions(topic, writablePartitions) + partitions := client.cachedPartitions(topic, pt) // len==0 catches when it's nil (no such topic) and the odd case when every single // partition is undergoing leader election simultaneously. Callers have to be able to handle @@ -403,7 +388,7 @@ func (client *client) WritablePartitions(topic string) ([]int32, error) { if err != nil { return nil, err } - partitions = client.cachedPartitions(topic, writablePartitions) + partitions = client.cachedPartitions(topic, pt) } if partitions == nil { @@ -414,56 +399,24 @@ func (client *client) WritablePartitions(topic string) ([]int32, error) { } func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if errors.Is(metadata.Err, ErrReplicaNotAvailable) { - return dupInt32Slice(metadata.Replicas), metadata.Err - } - return dupInt32Slice(metadata.Replicas), nil + return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 { + return metadata.Replicas + }) } func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { - if client.Closed() { - return nil, ErrClosedClient - } - - metadata := client.cachedMetadata(topic, partitionID) - - if metadata == nil { - err := client.RefreshMetadata(topic) - if err != nil { - return nil, err - } - metadata = client.cachedMetadata(topic, partitionID) - } - - if metadata == nil { - return nil, ErrUnknownTopicOrPartition - } - - if errors.Is(metadata.Err, ErrReplicaNotAvailable) { - return dupInt32Slice(metadata.Isr), metadata.Err - } - return dupInt32Slice(metadata.Isr), nil + return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 { + return metadata.Isr + }) } func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, error) { + return client.getReplicas(topic, partitionID, func(metadata *PartitionMetadata) []int32 { + return metadata.OfflineReplicas + }) +} + +func (client *client) getReplicas(topic string, partitionID int32, extractor func(metadata *PartitionMetadata) []int32) ([]int32, error) { if client.Closed() { return nil, ErrClosedClient } @@ -482,10 +435,11 @@ func (client *client) OfflineReplicas(topic string, partitionID int32) ([]int32, return nil, ErrUnknownTopicOrPartition } + replicas := extractor(metadata) if errors.Is(metadata.Err, ErrReplicaNotAvailable) { - return dupInt32Slice(metadata.OfflineReplicas), metadata.Err + return dupInt32Slice(replicas), metadata.Err } - return dupInt32Slice(metadata.OfflineReplicas), nil + return dupInt32Slice(replicas), nil } func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { diff --git a/vendor/github.com/IBM/sarama/config.go b/vendor/github.com/IBM/sarama/config.go index facf766..8c7c4c9 100644 --- a/vendor/github.com/IBM/sarama/config.go +++ b/vendor/github.com/IBM/sarama/config.go @@ -269,6 +269,13 @@ type Config struct { // more sophisticated backoff strategies. This takes precedence over // `Backoff` if set. BackoffFunc func(retries, maxRetries int) time.Duration + // The maximum length of the bridging buffer between `input` and `retries` channels + // in AsyncProducer#retryHandler. + // The limit is to prevent this buffer from overflowing or causing OOM. + // Defaults to 0 for unlimited. + // Any value between 0 and 4096 is pushed to 4096. + // A zero or negative value indicates unlimited. + MaxBufferLength int } // Interceptors to be called when the producer dispatcher reads the @@ -387,7 +394,7 @@ type Config struct { // default is 250ms, since 0 causes the consumer to spin when no events are // available. 100-500ms is a reasonable range for most cases. Kafka only // supports precision up to milliseconds; nanoseconds will be truncated. - // Equivalent to the JVM's `fetch.wait.max.ms`. + // Equivalent to the JVM's `fetch.max.wait.ms`. MaxWaitTime time.Duration // The maximum amount of time the consumer expects a message takes to diff --git a/vendor/github.com/IBM/sarama/create_topics_request.go b/vendor/github.com/IBM/sarama/create_topics_request.go index 8382d17..e8c0f01 100644 --- a/vendor/github.com/IBM/sarama/create_topics_request.go +++ b/vendor/github.com/IBM/sarama/create_topics_request.go @@ -16,6 +16,21 @@ type CreateTopicsRequest struct { ValidateOnly bool } +func NewCreateTopicsRequest(version KafkaVersion, topicDetails map[string]*TopicDetail, timeout time.Duration) *CreateTopicsRequest { + r := &CreateTopicsRequest{ + TopicDetails: topicDetails, + Timeout: timeout, + } + if version.IsAtLeast(V2_0_0_0) { + r.Version = 3 + } else if version.IsAtLeast(V0_11_0_0) { + r.Version = 2 + } else if version.IsAtLeast(V0_10_2_0) { + r.Version = 1 + } + return r +} + func (c *CreateTopicsRequest) encode(pe packetEncoder) error { if err := pe.putArrayLength(len(c.TopicDetails)); err != nil { return err diff --git a/vendor/github.com/IBM/sarama/delete_topics_request.go b/vendor/github.com/IBM/sarama/delete_topics_request.go index 252c0d0..f38f327 100644 --- a/vendor/github.com/IBM/sarama/delete_topics_request.go +++ b/vendor/github.com/IBM/sarama/delete_topics_request.go @@ -8,6 +8,21 @@ type DeleteTopicsRequest struct { Timeout time.Duration } +func NewDeleteTopicsRequest(version KafkaVersion, topics []string, timeout time.Duration) *DeleteTopicsRequest { + d := &DeleteTopicsRequest{ + Topics: topics, + Timeout: timeout, + } + if version.IsAtLeast(V2_1_0_0) { + d.Version = 3 + } else if version.IsAtLeast(V2_0_0_0) { + d.Version = 2 + } else if version.IsAtLeast(V0_11_0_0) { + d.Version = 1 + } + return d +} + func (d *DeleteTopicsRequest) encode(pe packetEncoder) error { if err := pe.putStringArray(d.Topics); err != nil { return err diff --git a/vendor/github.com/IBM/sarama/docker-compose.yml b/vendor/github.com/IBM/sarama/docker-compose.yml index 55283cf..1e66cca 100644 --- a/vendor/github.com/IBM/sarama/docker-compose.yml +++ b/vendor/github.com/IBM/sarama/docker-compose.yml @@ -1,8 +1,7 @@ -version: '3.9' services: zookeeper-1: - hostname: 'zookeeper-1' - image: 'docker.io/library/zookeeper:3.6.3' + container_name: 'zookeeper-1' + image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always environment: @@ -14,8 +13,8 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-2: - hostname: 'zookeeper-2' - image: 'docker.io/library/zookeeper:3.6.3' + container_name: 'zookeeper-2' + image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always environment: @@ -27,8 +26,8 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' zookeeper-3: - hostname: 'zookeeper-3' - image: 'docker.io/library/zookeeper:3.6.3' + container_name: 'zookeeper-3' + image: 'docker.io/library/zookeeper:3.7.2' init: true restart: always environment: @@ -40,20 +39,20 @@ services: ZOO_MAX_CLIENT_CNXNS: '0' ZOO_4LW_COMMANDS_WHITELIST: 'mntr,conf,ruok' kafka-1: - hostname: 'kafka-1' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-1' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-1:9091', ] @@ -68,13 +67,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29091' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-1:9091,LISTENER_LOCAL://localhost:29091' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '1' KAFKA_CFG_BROKER_RACK: '1' @@ -86,20 +86,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-2: - hostname: 'kafka-2' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-2' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-2:9091', ] @@ -114,13 +114,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29092' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-2:9091,LISTENER_LOCAL://localhost:29092' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '2' KAFKA_CFG_BROKER_RACK: '2' @@ -132,20 +133,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-3: - hostname: 'kafka-3' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-3' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-3:9091', ] @@ -160,13 +161,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29093' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-3:9091,LISTENER_LOCAL://localhost:29093' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '3' KAFKA_CFG_BROKER_RACK: '3' @@ -178,20 +180,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-4: - hostname: 'kafka-4' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-4' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-4:9091', ] @@ -206,13 +208,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29094' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-4:9091,LISTENER_LOCAL://localhost:29094' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '4' KAFKA_CFG_BROKER_RACK: '4' @@ -224,20 +227,20 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" kafka-5: - hostname: 'kafka-5' - image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.0}' + container_name: 'kafka-5' + image: 'sarama/fv-kafka-${KAFKA_VERSION:-3.6.2}' init: true build: context: . dockerfile: Dockerfile.kafka args: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} SCALA_VERSION: ${SCALA_VERSION:-2.13} healthcheck: test: [ 'CMD', - '/opt/kafka-${KAFKA_VERSION:-3.6.0}/bin/kafka-broker-api-versions.sh', + '/opt/kafka-${KAFKA_VERSION:-3.6.2}/bin/kafka-broker-api-versions.sh', '--bootstrap-server', 'kafka-5:9091', ] @@ -252,13 +255,14 @@ services: - toxiproxy restart: always environment: - KAFKA_VERSION: ${KAFKA_VERSION:-3.6.0} + KAFKA_VERSION: ${KAFKA_VERSION:-3.6.2} KAFKA_CFG_ZOOKEEPER_CONNECT: 'zookeeper-1:2181,zookeeper-2:2181,zookeeper-3:2181' KAFKA_CFG_LISTENERS: 'LISTENER_INTERNAL://:9091,LISTENER_LOCAL://:29095' KAFKA_CFG_ADVERTISED_LISTENERS: 'LISTENER_INTERNAL://kafka-5:9091,LISTENER_LOCAL://localhost:29095' KAFKA_CFG_INTER_BROKER_LISTENER_NAME: 'LISTENER_INTERNAL' KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP: 'LISTENER_INTERNAL:PLAINTEXT,LISTENER_LOCAL:PLAINTEXT' KAFKA_CFG_DEFAULT_REPLICATION_FACTOR: '2' + KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR: '2' KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: '2' KAFKA_CFG_BROKER_ID: '5' KAFKA_CFG_BROKER_RACK: '5' @@ -270,7 +274,7 @@ services: KAFKA_CFG_GROUP_INITIAL_REBALANCE_DELAY_MS: 0 KAFKA_JVM_PERFORMANCE_OPTS: "-XX:+IgnoreUnrecognizedVMOptions" toxiproxy: - hostname: 'toxiproxy' + container_name: 'toxiproxy' image: 'ghcr.io/shopify/toxiproxy:2.4.0' init: true healthcheck: diff --git a/vendor/github.com/IBM/sarama/elect_leaders_request.go b/vendor/github.com/IBM/sarama/elect_leaders_request.go new file mode 100644 index 0000000..cd8d6a7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/elect_leaders_request.go @@ -0,0 +1,134 @@ +package sarama + +type ElectLeadersRequest struct { + Version int16 + Type ElectionType + TopicPartitions map[string][]int32 + TimeoutMs int32 +} + +func (r *ElectLeadersRequest) encode(pe packetEncoder) error { + if r.Version > 0 { + pe.putInt8(int8(r.Type)) + } + + pe.putCompactArrayLength(len(r.TopicPartitions)) + + for topic, partitions := range r.TopicPartitions { + if r.Version < 2 { + if err := pe.putString(topic); err != nil { + return err + } + } else { + if err := pe.putCompactString(topic); err != nil { + return err + } + } + + if err := pe.putCompactInt32Array(partitions); err != nil { + return err + } + + if r.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } + } + + pe.putInt32(r.TimeoutMs) + + if r.Version >= 2 { + pe.putEmptyTaggedFieldArray() + } + + return nil +} + +func (r *ElectLeadersRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.Version > 0 { + t, err := pd.getInt8() + if err != nil { + return err + } + r.Type = ElectionType(t) + } + + topicCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + if topicCount > 0 { + r.TopicPartitions = make(map[string][]int32) + for i := 0; i < topicCount; i++ { + var topic string + if r.Version < 2 { + topic, err = pd.getString() + } else { + topic, err = pd.getCompactString() + } + if err != nil { + return err + } + partitionCount, err := pd.getCompactArrayLength() + if err != nil { + return err + } + partitions := make([]int32, partitionCount) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + partitions[j] = partition + } + r.TopicPartitions[topic] = partitions + if r.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + } + } + + r.TimeoutMs, err = pd.getInt32() + if err != nil { + return err + } + + if r.Version >= 2 { + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + return nil +} + +func (r *ElectLeadersRequest) key() int16 { + return 43 +} + +func (r *ElectLeadersRequest) version() int16 { + return r.Version +} + +func (r *ElectLeadersRequest) headerVersion() int16 { + return 2 +} + +func (r *ElectLeadersRequest) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + +func (r *ElectLeadersRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V2_4_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_10_0_0 + default: + return V2_4_0_0 + } +} diff --git a/vendor/github.com/IBM/sarama/elect_leaders_response.go b/vendor/github.com/IBM/sarama/elect_leaders_response.go new file mode 100644 index 0000000..8c85249 --- /dev/null +++ b/vendor/github.com/IBM/sarama/elect_leaders_response.go @@ -0,0 +1,173 @@ +package sarama + +import "time" + +type PartitionResult struct { + ErrorCode KError + ErrorMessage *string +} + +func (b *PartitionResult) encode(pe packetEncoder, version int16) error { + pe.putInt16(int16(b.ErrorCode)) + if version < 2 { + if err := pe.putNullableString(b.ErrorMessage); err != nil { + return err + } + } else { + if err := pe.putNullableCompactString(b.ErrorMessage); err != nil { + return err + } + } + if version >= 2 { + pe.putEmptyTaggedFieldArray() + } + return nil +} + +func (b *PartitionResult) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + b.ErrorCode = KError(kerr) + if version < 2 { + b.ErrorMessage, err = pd.getNullableString() + } else { + b.ErrorMessage, err = pd.getCompactNullableString() + } + if version >= 2 { + _, err = pd.getEmptyTaggedFieldArray() + } + return err +} + +type ElectLeadersResponse struct { + Version int16 + ThrottleTimeMs int32 + ErrorCode KError + ReplicaElectionResults map[string]map[int32]*PartitionResult +} + +func (r *ElectLeadersResponse) encode(pe packetEncoder) error { + pe.putInt32(r.ThrottleTimeMs) + + if r.Version > 0 { + pe.putInt16(int16(r.ErrorCode)) + } + + pe.putCompactArrayLength(len(r.ReplicaElectionResults)) + for topic, partitions := range r.ReplicaElectionResults { + if r.Version < 2 { + if err := pe.putString(topic); err != nil { + return err + } + } else { + if err := pe.putCompactString(topic); err != nil { + return err + } + } + pe.putCompactArrayLength(len(partitions)) + for partition, result := range partitions { + pe.putInt32(partition) + if err := result.encode(pe, r.Version); err != nil { + return err + } + } + pe.putEmptyTaggedFieldArray() + } + + pe.putEmptyTaggedFieldArray() + + return nil +} + +func (r *ElectLeadersResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ThrottleTimeMs, err = pd.getInt32(); err != nil { + return err + } + if r.Version > 0 { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.ErrorCode = KError(kerr) + } + + numTopics, err := pd.getCompactArrayLength() + if err != nil { + return err + } + + r.ReplicaElectionResults = make(map[string]map[int32]*PartitionResult, numTopics) + for i := 0; i < numTopics; i++ { + var topic string + if r.Version < 2 { + topic, err = pd.getString() + } else { + topic, err = pd.getCompactString() + } + if err != nil { + return err + } + + numPartitions, err := pd.getCompactArrayLength() + if err != nil { + return err + } + r.ReplicaElectionResults[topic] = make(map[int32]*PartitionResult, numPartitions) + for j := 0; j < numPartitions; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + result := new(PartitionResult) + if err := result.decode(pd, r.Version); err != nil { + return err + } + r.ReplicaElectionResults[topic][partition] = result + } + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + } + + if _, err := pd.getEmptyTaggedFieldArray(); err != nil { + return err + } + + return nil +} + +func (r *ElectLeadersResponse) key() int16 { + return 43 +} + +func (r *ElectLeadersResponse) version() int16 { + return r.Version +} + +func (r *ElectLeadersResponse) headerVersion() int16 { + return 1 +} + +func (r *ElectLeadersResponse) isValidVersion() bool { + return r.Version >= 0 && r.Version <= 2 +} + +func (r *ElectLeadersResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 2: + return V2_4_0_0 + case 1: + return V0_11_0_0 + case 0: + return V0_10_0_0 + default: + return V2_4_0_0 + } +} + +func (r *ElectLeadersResponse) throttleTime() time.Duration { + return time.Duration(r.ThrottleTimeMs) * time.Millisecond +} diff --git a/vendor/github.com/IBM/sarama/election_type.go b/vendor/github.com/IBM/sarama/election_type.go new file mode 100644 index 0000000..01f3b65 --- /dev/null +++ b/vendor/github.com/IBM/sarama/election_type.go @@ -0,0 +1,10 @@ +package sarama + +type ElectionType int8 + +const ( + // PreferredElection constant type + PreferredElection ElectionType = 0 + // UncleanElection constant type + UncleanElection ElectionType = 1 +) diff --git a/vendor/github.com/IBM/sarama/entrypoint.sh b/vendor/github.com/IBM/sarama/entrypoint.sh index 9fe9a44..516a8dc 100644 --- a/vendor/github.com/IBM/sarama/entrypoint.sh +++ b/vendor/github.com/IBM/sarama/entrypoint.sh @@ -3,7 +3,7 @@ set -eu set -o pipefail -KAFKA_VERSION="${KAFKA_VERSION:-3.6.0}" +KAFKA_VERSION="${KAFKA_VERSION:-3.6.2}" KAFKA_HOME="/opt/kafka-${KAFKA_VERSION}" if [ ! -d "${KAFKA_HOME}" ]; then diff --git a/vendor/github.com/IBM/sarama/errors.go b/vendor/github.com/IBM/sarama/errors.go index 2c431ae..842d302 100644 --- a/vendor/github.com/IBM/sarama/errors.go +++ b/vendor/github.com/IBM/sarama/errors.go @@ -304,7 +304,7 @@ func (err KError) Error() string { case ErrOffsetsLoadInProgress: return "kafka server: The coordinator is still loading offsets and cannot currently process requests" case ErrConsumerCoordinatorNotAvailable: - return "kafka server: Offset's topic has not yet been created" + return "kafka server: The coordinator is not available" case ErrNotCoordinatorForConsumer: return "kafka server: Request was for a consumer group that is not coordinated by this broker" case ErrInvalidTopic: diff --git a/vendor/github.com/IBM/sarama/mockresponses.go b/vendor/github.com/IBM/sarama/mockresponses.go index d09415b..2c35279 100644 --- a/vendor/github.com/IBM/sarama/mockresponses.go +++ b/vendor/github.com/IBM/sarama/mockresponses.go @@ -778,6 +778,28 @@ func (mr *MockListPartitionReassignmentsResponse) For(reqBody versionedDecoder) return res } +type MockElectLeadersResponse struct { + t TestReporter +} + +func NewMockElectLeadersResponse(t TestReporter) *MockElectLeadersResponse { + return &MockElectLeadersResponse{t: t} +} + +func (mr *MockElectLeadersResponse) For(reqBody versionedDecoder) encoderWithHeader { + req := reqBody.(*ElectLeadersRequest) + res := &ElectLeadersResponse{Version: req.version(), ReplicaElectionResults: map[string]map[int32]*PartitionResult{}} + + for topic, partitions := range req.TopicPartitions { + for _, partition := range partitions { + res.ReplicaElectionResults[topic] = map[int32]*PartitionResult{ + partition: {ErrorCode: ErrNoError}, + } + } + } + return res +} + type MockDeleteRecordsResponse struct { t TestReporter } diff --git a/vendor/github.com/IBM/sarama/offset_manager.go b/vendor/github.com/IBM/sarama/offset_manager.go index 1bf5459..2948651 100644 --- a/vendor/github.com/IBM/sarama/offset_manager.go +++ b/vendor/github.com/IBM/sarama/offset_manager.go @@ -251,18 +251,31 @@ func (om *offsetManager) Commit() { } func (om *offsetManager) flushToBroker() { + broker, err := om.coordinator() + if err != nil { + om.handleError(err) + return + } + + // Care needs to be taken to unlock this. Don't want to defer the unlock as this would + // cause the lock to be held while waiting for the broker to reply. + broker.lock.Lock() req := om.constructRequest() if req == nil { + broker.lock.Unlock() return } + resp, rp, err := sendOffsetCommit(broker, req) + broker.lock.Unlock() - broker, err := om.coordinator() if err != nil { om.handleError(err) + om.releaseCoordinator(broker) + _ = broker.Close() return } - resp, err := broker.CommitOffset(req) + err = handleResponsePromise(req, resp, rp, nil) if err != nil { om.handleError(err) om.releaseCoordinator(broker) @@ -270,9 +283,20 @@ func (om *offsetManager) flushToBroker() { return } + broker.handleThrottledResponse(resp) om.handleResponse(broker, req, resp) } +func sendOffsetCommit(coordinator *Broker, req *OffsetCommitRequest) (*OffsetCommitResponse, *responsePromise, error) { + resp := new(OffsetCommitResponse) + responseHeaderVersion := resp.headerVersion() + promise, err := coordinator.send(req, true, responseHeaderVersion) + if err != nil { + return nil, nil, err + } + return resp, promise, nil +} + func (om *offsetManager) constructRequest() *OffsetCommitRequest { r := &OffsetCommitRequest{ Version: 1, diff --git a/vendor/github.com/IBM/sarama/request.go b/vendor/github.com/IBM/sarama/request.go index e8e74ca..8f0c2b5 100644 --- a/vendor/github.com/IBM/sarama/request.go +++ b/vendor/github.com/IBM/sarama/request.go @@ -194,7 +194,8 @@ func allocateBody(key, version int16) protocolBody { // 41: DescribeDelegationTokenRequest case 42: return &DeleteGroupsRequest{Version: version} - // 43: ElectLeadersRequest + case 43: + return &ElectLeadersRequest{Version: version} case 44: return &IncrementalAlterConfigsRequest{Version: version} case 45: diff --git a/vendor/github.com/IBM/sarama/server.properties b/vendor/github.com/IBM/sarama/server.properties new file mode 100644 index 0000000..21ba1c7 --- /dev/null +++ b/vendor/github.com/IBM/sarama/server.properties @@ -0,0 +1,138 @@ +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# +# This configuration file is intended for use in ZK-based mode, where Apache ZooKeeper is required. +# See kafka.server.KafkaConfig for additional details and defaults +# + +############################# Server Basics ############################# + +# The id of the broker. This must be set to a unique integer for each broker. +broker.id=0 + +############################# Socket Server Settings ############################# + +# The address the socket server listens on. If not configured, the host name will be equal to the value of +# java.net.InetAddress.getCanonicalHostName(), with PLAINTEXT listener name, and port 9092. +# FORMAT: +# listeners = listener_name://host_name:port +# EXAMPLE: +# listeners = PLAINTEXT://your.host.name:9092 +#listeners=PLAINTEXT://:9092 + +# Listener name, hostname and port the broker will advertise to clients. +# If not set, it uses the value for "listeners". +#advertised.listeners=PLAINTEXT://your.host.name:9092 + +# Maps listener names to security protocols, the default is for them to be the same. See the config documentation for more details +#listener.security.protocol.map=PLAINTEXT:PLAINTEXT,SSL:SSL,SASL_PLAINTEXT:SASL_PLAINTEXT,SASL_SSL:SASL_SSL + +# The number of threads that the server uses for receiving requests from the network and sending responses to the network +num.network.threads=3 + +# The number of threads that the server uses for processing requests, which may include disk I/O +num.io.threads=8 + +# The send buffer (SO_SNDBUF) used by the socket server +socket.send.buffer.bytes=102400 + +# The receive buffer (SO_RCVBUF) used by the socket server +socket.receive.buffer.bytes=102400 + +# The maximum size of a request that the socket server will accept (protection against OOM) +socket.request.max.bytes=104857600 + + +############################# Log Basics ############################# + +# A comma separated list of directories under which to store log files +log.dirs=/tmp/kafka-logs + +# The default number of log partitions per topic. More partitions allow greater +# parallelism for consumption, but this will also result in more files across +# the brokers. +num.partitions=1 + +# The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +# This value is recommended to be increased for installations with data dirs located in RAID array. +num.recovery.threads.per.data.dir=1 + +############################# Internal Topic Settings ############################# +# The replication factor for the group metadata internal topics "__consumer_offsets" and "__transaction_state" +# For anything other than development testing, a value greater than 1 is recommended to ensure availability such as 3. +offsets.topic.replication.factor=1 +transaction.state.log.replication.factor=1 +transaction.state.log.min.isr=1 + +############################# Log Flush Policy ############################# + +# Messages are immediately written to the filesystem but by default we only fsync() to sync +# the OS cache lazily. The following configurations control the flush of data to disk. +# There are a few important trade-offs here: +# 1. Durability: Unflushed data may be lost if you are not using replication. +# 2. Latency: Very large flush intervals may lead to latency spikes when the flush does occur as there will be a lot of data to flush. +# 3. Throughput: The flush is generally the most expensive operation, and a small flush interval may lead to excessive seeks. +# The settings below allow one to configure the flush policy to flush data after a period of time or +# every N messages (or both). This can be done globally and overridden on a per-topic basis. + +# The number of messages to accept before forcing a flush of data to disk +#log.flush.interval.messages=10000 + +# The maximum amount of time a message can sit in a log before we force a flush +#log.flush.interval.ms=1000 + +############################# Log Retention Policy ############################# + +# The following configurations control the disposal of log segments. The policy can +# be set to delete segments after a period of time, or after a given size has accumulated. +# A segment will be deleted whenever *either* of these criteria are met. Deletion always happens +# from the end of the log. + +# The minimum age of a log file to be eligible for deletion due to age +log.retention.hours=168 + +# A size-based retention policy for logs. Segments are pruned from the log unless the remaining +# segments drop below log.retention.bytes. Functions independently of log.retention.hours. +#log.retention.bytes=1073741824 + +# The maximum size of a log segment file. When this size is reached a new log segment will be created. +#log.segment.bytes=1073741824 + +# The interval at which log segments are checked to see if they can be deleted according +# to the retention policies +log.retention.check.interval.ms=300000 + +############################# Zookeeper ############################# + +# Zookeeper connection string (see zookeeper docs for details). +# This is a comma separated host:port pairs, each corresponding to a zk +# server. e.g. "127.0.0.1:3000,127.0.0.1:3001,127.0.0.1:3002". +# You can also append an optional chroot string to the urls to specify the +# root directory for all kafka znodes. +zookeeper.connect=localhost:2181 + +# Timeout in ms for connecting to zookeeper +zookeeper.connection.timeout.ms=18000 + + +############################# Group Coordinator Settings ############################# + +# The following configuration specifies the time, in milliseconds, that the GroupCoordinator will delay the initial consumer rebalance. +# The rebalance will be further delayed by the value of group.initial.rebalance.delay.ms as new members join the group, up to a maximum of max.poll.interval.ms. +# The default value for this is 3 seconds. +# We override this to 0 here as it makes for a better out-of-the-box experience for development and testing. +# However, in production environments the default value of 3 seconds is more suitable as this will help to avoid unnecessary, and potentially expensive, rebalances during application startup. +group.initial.rebalance.delay.ms=0 diff --git a/vendor/github.com/IBM/sarama/transaction_manager.go b/vendor/github.com/IBM/sarama/transaction_manager.go index ca7e13d..bf20b75 100644 --- a/vendor/github.com/IBM/sarama/transaction_manager.go +++ b/vendor/github.com/IBM/sarama/transaction_manager.go @@ -466,7 +466,7 @@ func (t *transactionManager) publishOffsetsToTxn(offsets topicPartitionOffsets, resultOffsets = failedTxn if len(resultOffsets) == 0 { - DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s %+v\n", + DebugLogger.Printf("txnmgr/txn-offset-commit [%s] successful txn-offset-commit with group %s\n", t.transactionalID, groupId) return resultOffsets, false, nil } diff --git a/vendor/github.com/IBM/sarama/utils.go b/vendor/github.com/IBM/sarama/utils.go index feadc00..b0e1ace 100644 --- a/vendor/github.com/IBM/sarama/utils.go +++ b/vendor/github.com/IBM/sarama/utils.go @@ -44,11 +44,10 @@ func withRecover(fn func()) { } func safeAsyncClose(b *Broker) { - tmp := b // local var prevents clobbering in goroutine go withRecover(func() { - if connected, _ := tmp.Connected(); connected { - if err := tmp.Close(); err != nil { - Logger.Println("Error closing broker", tmp.ID(), ":", err) + if connected, _ := b.Connected(); connected { + if err := b.Close(); err != nil { + Logger.Println("Error closing broker", b.ID(), ":", err) } } }) @@ -198,7 +197,16 @@ var ( V3_4_1_0 = newKafkaVersion(3, 4, 1, 0) V3_5_0_0 = newKafkaVersion(3, 5, 0, 0) V3_5_1_0 = newKafkaVersion(3, 5, 1, 0) + V3_5_2_0 = newKafkaVersion(3, 5, 2, 0) V3_6_0_0 = newKafkaVersion(3, 6, 0, 0) + V3_6_1_0 = newKafkaVersion(3, 6, 1, 0) + V3_6_2_0 = newKafkaVersion(3, 6, 2, 0) + V3_7_0_0 = newKafkaVersion(3, 7, 0, 0) + V3_7_1_0 = newKafkaVersion(3, 7, 1, 0) + V3_8_0_0 = newKafkaVersion(3, 8, 0, 0) + V3_8_1_0 = newKafkaVersion(3, 8, 1, 0) + V3_9_0_0 = newKafkaVersion(3, 9, 0, 0) + V4_0_0_0 = newKafkaVersion(4, 0, 0, 0) SupportedVersions = []KafkaVersion{ V0_8_2_0, @@ -237,8 +245,10 @@ var ( V2_6_0_0, V2_6_1_0, V2_6_2_0, + V2_6_3_0, V2_7_0_0, V2_7_1_0, + V2_7_2_0, V2_8_0_0, V2_8_1_0, V2_8_2_0, @@ -259,10 +269,19 @@ var ( V3_4_1_0, V3_5_0_0, V3_5_1_0, + V3_5_2_0, V3_6_0_0, + V3_6_1_0, + V3_6_2_0, + V3_7_0_0, + V3_7_1_0, + V3_8_0_0, + V3_8_1_0, + V3_9_0_0, + V4_0_0_0, } MinVersion = V0_8_2_0 - MaxVersion = V3_6_0_0 + MaxVersion = V4_0_0_0 DefaultVersion = V2_1_0_0 // reduced set of protocol versions to matrix test @@ -274,11 +293,11 @@ var ( V2_0_1_0, V2_2_2_0, V2_4_1_0, - V2_6_2_0, + V2_6_3_0, V2_8_2_0, V3_1_2_0, V3_3_2_0, - V3_6_0_0, + V3_6_2_0, } ) diff --git a/vendor/github.com/klauspost/compress/.goreleaser.yml b/vendor/github.com/klauspost/compress/.goreleaser.yml index a229538..4528059 100644 --- a/vendor/github.com/klauspost/compress/.goreleaser.yml +++ b/vendor/github.com/klauspost/compress/.goreleaser.yml @@ -1,5 +1,5 @@ -# This is an example goreleaser.yaml file with some sane defaults. -# Make sure to check the documentation at http://goreleaser.com +version: 2 + before: hooks: - ./gen.sh @@ -99,7 +99,7 @@ archives: checksum: name_template: 'checksums.txt' snapshot: - name_template: "{{ .Tag }}-next" + version_template: "{{ .Tag }}-next" changelog: sort: asc filters: diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index 05c7359..244ee19 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -14,8 +14,55 @@ This package provides various compression algorithms. [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) +# package usage + +Use `go get github.com/klauspost/compress@latest` to add it to your project. + +This package will support the current Go version and 2 versions back. + +* Use the `nounsafe` tag to disable all use of the "unsafe" package. +* Use the `noasm` tag to disable all assembly across packages. + +Use the links above for more information on each. + # changelog +* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0) + * Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036 + * fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028 + * flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043 + * flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045 + * s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048 + * flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049 + * flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050 + +* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11) + * zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017 + * s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014 + * gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011 + * gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013 + +* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) + * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 + * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 + * s2: Add EncodeBuffer buffer recycling callback https://github.com/klauspost/compress/pull/982 + * zstd: Improve memory usage on small streaming encodes https://github.com/klauspost/compress/pull/1007 + * flate: read data written with partial flush by @vajexal in https://github.com/klauspost/compress/pull/996 + +* Jun 12th, 2024 - [1.17.9](https://github.com/klauspost/compress/releases/tag/v1.17.9) + * s2: Reduce ReadFrom temporary allocations https://github.com/klauspost/compress/pull/949 + * flate, zstd: Shave some bytes off amd64 matchLen by @greatroar in https://github.com/klauspost/compress/pull/963 + * Upgrade zip/zlib to 1.22.4 upstream https://github.com/klauspost/compress/pull/970 https://github.com/klauspost/compress/pull/971 + * zstd: BuildDict fails with RLE table https://github.com/klauspost/compress/pull/951 + +* Apr 9th, 2024 - [1.17.8](https://github.com/klauspost/compress/releases/tag/v1.17.8) + * zstd: Reject blocks where reserved values are not 0 https://github.com/klauspost/compress/pull/885 + * zstd: Add RLE detection+encoding https://github.com/klauspost/compress/pull/938 + +* Feb 21st, 2024 - [1.17.7](https://github.com/klauspost/compress/releases/tag/v1.17.7) + * s2: Add AsyncFlush method: Complete the block without flushing by @Jille in https://github.com/klauspost/compress/pull/927 + * s2: Fix literal+repeat exceeds dst crash https://github.com/klauspost/compress/pull/930 + * Feb 5th, 2024 - [1.17.6](https://github.com/klauspost/compress/releases/tag/v1.17.6) * zstd: Fix incorrect repeat coding in best mode https://github.com/klauspost/compress/pull/923 * s2: Fix DecodeConcurrent deadlock on errors https://github.com/klauspost/compress/pull/925 @@ -44,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 * Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) - * s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 + * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871 * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 - * s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 + * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867 * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 @@ -81,7 +128,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Various minor improvements by @greatroar in https://github.com/klauspost/compress/pull/788 https://github.com/klauspost/compress/pull/794 https://github.com/klauspost/compress/pull/795 * s2: Fix huge block overflow https://github.com/klauspost/compress/pull/779 * s2: Allow CustomEncoder fallback https://github.com/klauspost/compress/pull/780 - * gzhttp: Suppport ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 + * gzhttp: Support ResponseWriter Unwrap() in gzhttp handler by @jgimenez in https://github.com/klauspost/compress/pull/799 * Mar 13, 2023 - [v1.16.1](https://github.com/klauspost/compress/releases/tag/v1.16.1) * zstd: Speed up + improve best encoder by @greatroar in https://github.com/klauspost/compress/pull/776 @@ -103,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp See changes to v1.15.x * Jan 21st, 2023 (v1.15.15) - * deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 + * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 @@ -136,7 +183,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649 * Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651 * flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656 - * zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657 + * zstd: Improve "better" compression https://github.com/klauspost/compress/pull/657 * s2: Improve "best" compression https://github.com/klauspost/compress/pull/658 * s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635 * s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646 @@ -146,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 - * zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 + * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643 * July 13, 2022 (v1.15.8) @@ -188,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 - * flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 + * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590 * May 11, 2022 (v1.15.4) @@ -215,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) * Mar 3, 2022 (v1.15.0) - * zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) - * zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) + * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498) + * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505) * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) - * flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) - * gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) - * gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) + * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509) + * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400) + * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510) Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. @@ -237,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 - * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) * Feb 17, 2022 (v1.14.3) * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) @@ -339,7 +386,7 @@ While the release has been extensively tested, it is recommended to testing when * s2: Fix binaries. * Feb 25, 2021 (v1.11.8) - * s2: Fixed occational out-of-bounds write on amd64. Upgrade recommended. + * s2: Fixed occasional out-of-bounds write on amd64. Upgrade recommended. * s2: Add AMD64 assembly for better mode. 25-50% faster. [#315](https://github.com/klauspost/compress/pull/315) * s2: Less upfront decoder allocation. [#322](https://github.com/klauspost/compress/pull/322) * zstd: Faster "compression" of incompressible data. [#314](https://github.com/klauspost/compress/pull/314) @@ -518,7 +565,7 @@ While the release has been extensively tested, it is recommended to testing when * Feb 19, 2016: Faster bit writer, level -2 is 15% faster, level 1 is 4% faster. * Feb 19, 2016: Handle small payloads faster in level 1-3. * Feb 19, 2016: Added faster level 2 + 3 compression modes. -* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progresssion in terms of compression. New default level is 5. +* Feb 19, 2016: [Rebalanced compression levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/), so there is a more even progression in terms of compression. New default level is 5. * Feb 14, 2016: Snappy: Merge upstream changes. * Feb 14, 2016: Snappy: Fix aggressive skipping. * Feb 14, 2016: Snappy: Update benchmark. @@ -544,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: -| old import | new import | Documentation -|--------------------|-----------------------------------------|--------------------| -| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) -| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) -| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) -| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) +Typical speed is about 2x of the standard library packages. + +| old import | new import | Documentation | +|------------------|---------------------------------------|-------------------------------------------------------------------------| +| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | +| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) | +| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | +| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). @@ -604,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle. Compression is almost always worse than the fastest compression level and each write will allocate (a little) memory. -# Performance Update 2018 - -It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD. - -The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet. - -The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input. - -The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet). - - -## Overall differences. - -There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels. - -The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library. - -This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression. - -There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab. - -## Web Content - -This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS. - -Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big. - -Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case. - -## Object files - -This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible. - -The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression. - -The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively. - -## Highly Compressible File - -This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc. - -It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression. - -So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground". - -## Medium-High Compressible - -This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams. - -We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both. - -## Medium Compressible - -I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario. - -The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior. - - -## Un-compressible Content - -This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections. - - -## Huffman only compression - -This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character. - -This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM). - -Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core. - -The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%). - -The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup. - -For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). - -This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip. # Other packages diff --git a/vendor/github.com/klauspost/compress/flate/deflate.go b/vendor/github.com/klauspost/compress/flate/deflate.go index 66d1657..af53fb8 100644 --- a/vendor/github.com/klauspost/compress/flate/deflate.go +++ b/vendor/github.com/klauspost/compress/flate/deflate.go @@ -861,7 +861,7 @@ func (d *compressor) reset(w io.Writer) { } switch d.compressionLevel.chain { case 0: - // level was NoCompression or ConstantCompresssion. + // level was NoCompression or ConstantCompression. d.windowEnd = 0 default: s := d.state diff --git a/vendor/github.com/klauspost/compress/flate/fast_encoder.go b/vendor/github.com/klauspost/compress/flate/fast_encoder.go index c8124b5..0e8b163 100644 --- a/vendor/github.com/klauspost/compress/flate/fast_encoder.go +++ b/vendor/github.com/klauspost/compress/flate/fast_encoder.go @@ -6,8 +6,10 @@ package flate import ( - "encoding/binary" "fmt" + "math/bits" + + "github.com/klauspost/compress/internal/le" ) type fastEnc interface { @@ -58,11 +60,11 @@ const ( ) func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[i:]) + return le.Load64(b, i) } type tableEntry struct { @@ -134,8 +136,8 @@ func hashLen(u uint64, length, mls uint8) uint32 { // matchlen will return the match length between offsets and t in src. // The maximum length returned is maxMatchLength - 4. // It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlen(s, t int32, src []byte) int32 { - if debugDecode { +func (e *fastGen) matchlen(s, t int, src []byte) int32 { + if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) } @@ -149,18 +151,34 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 { panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")")) } } - s1 := int(s) + maxMatchLength - 4 - if s1 > len(src) { - s1 = len(src) + s1 := min(s+maxMatchLength-4, len(src)) + left := s1 - s + n := int32(0) + for left >= 8 { + diff := le.Load64(src, s) ^ le.Load64(src, t) + if diff != 0 { + return n + int32(bits.TrailingZeros64(diff)>>3) + } + s += 8 + t += 8 + n += 8 + left -= 8 } - // Extend the match to be as long as possible. - return int32(matchLen(src[s:s1], src[t:])) + a := src[s:s1] + b := src[t:] + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n } // matchlenLong will return the match length between offsets and t in src. // It is assumed that s > t, that t >=0 and s < len(src). -func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { +func (e *fastGen) matchlenLong(s, t int, src []byte) int32 { if debugDeflate { if t >= s { panic(fmt.Sprint("t >=s:", t, s)) @@ -176,7 +194,28 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 { } } // Extend the match to be as long as possible. - return int32(matchLen(src[s:], src[t:])) + left := len(src) - s + n := int32(0) + for left >= 8 { + diff := le.Load64(src, s) ^ le.Load64(src, t) + if diff != 0 { + return n + int32(bits.TrailingZeros64(diff)>>3) + } + s += 8 + t += 8 + n += 8 + left -= 8 + } + + a := src[s:] + b := src[t:] + for i := range a { + if a[i] != b[i] { + break + } + n++ + } + return n } // Reset the encoding table. diff --git a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go index f70594c..afdc8c0 100644 --- a/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go +++ b/vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go @@ -5,10 +5,11 @@ package flate import ( - "encoding/binary" "fmt" "io" "math" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -438,7 +439,7 @@ func (w *huffmanBitWriter) writeOutBits() { n := w.nbytes // We over-write, but faster... - binary.LittleEndian.PutUint64(w.bytes[n:], bits) + le.Store64(w.bytes[n:], bits) n += 6 if n >= bufferFlushSize { @@ -854,7 +855,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -882,7 +883,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -905,7 +906,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64(extraLength) << (nbits & 63) nbits += extraLengthBits if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -931,7 +932,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= c.code64() << (nbits & 63) nbits += c.len() if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -953,7 +954,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode) bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63) nbits += uint8(offsetComb) if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 @@ -1107,7 +1108,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // We must have at least 48 bits free. if nbits >= 8 { n := nbits >> 3 - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) bits >>= (n * 8) & 63 nbits -= n * 8 nbytes += n @@ -1136,7 +1137,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) { // Remaining... for _, t := range input { if nbits >= 48 { - binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits) + le.Store64(w.bytes[nbytes:], bits) //*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits bits >>= 48 nbits -= 48 diff --git a/vendor/github.com/klauspost/compress/flate/inflate.go b/vendor/github.com/klauspost/compress/flate/inflate.go index 2f410d6..0d7b437 100644 --- a/vendor/github.com/klauspost/compress/flate/inflate.go +++ b/vendor/github.com/klauspost/compress/flate/inflate.go @@ -298,6 +298,14 @@ const ( huffmanGenericReader ) +// flushMode tells decompressor when to return data +type flushMode uint8 + +const ( + syncFlush flushMode = iota // return data after sync flush block + partialFlush // return data after each block +) + // Decompress state. type decompressor struct { // Input source. @@ -332,6 +340,8 @@ type decompressor struct { nb uint final bool + + flushMode flushMode } func (f *decompressor) nextBlock() { @@ -618,7 +628,10 @@ func (f *decompressor) dataBlock() { } if n == 0 { - f.toRead = f.dict.readFlush() + if f.flushMode == syncFlush { + f.toRead = f.dict.readFlush() + } + f.finishBlock() return } @@ -657,8 +670,12 @@ func (f *decompressor) finishBlock() { if f.dict.availRead() > 0 { f.toRead = f.dict.readFlush() } + f.err = io.EOF + } else if f.flushMode == partialFlush && f.dict.availRead() > 0 { + f.toRead = f.dict.readFlush() } + f.step = nextBlock } @@ -789,15 +806,25 @@ func (f *decompressor) Reset(r io.Reader, dict []byte) error { return nil } -// NewReader returns a new ReadCloser that can be used -// to read the uncompressed version of r. -// If r does not also implement io.ByteReader, -// the decompressor may read more data than necessary from r. -// It is the caller's responsibility to call Close on the ReadCloser -// when finished reading. -// -// The ReadCloser returned by NewReader also implements Resetter. -func NewReader(r io.Reader) io.ReadCloser { +type ReaderOpt func(*decompressor) + +// WithPartialBlock tells decompressor to return after each block, +// so it can read data written with partial flush +func WithPartialBlock() ReaderOpt { + return func(f *decompressor) { + f.flushMode = partialFlush + } +} + +// WithDict initializes the reader with a preset dictionary +func WithDict(dict []byte) ReaderOpt { + return func(f *decompressor) { + f.dict.init(maxMatchOffset, dict) + } +} + +// NewReaderOpts returns new reader with provided options +func NewReaderOpts(r io.Reader, opts ...ReaderOpt) io.ReadCloser { fixedHuffmanDecoderInit() var f decompressor @@ -806,9 +833,26 @@ func NewReader(r io.Reader) io.ReadCloser { f.codebits = new([numCodes]int) f.step = nextBlock f.dict.init(maxMatchOffset, nil) + + for _, opt := range opts { + opt(&f) + } + return &f } +// NewReader returns a new ReadCloser that can be used +// to read the uncompressed version of r. +// If r does not also implement io.ByteReader, +// the decompressor may read more data than necessary from r. +// It is the caller's responsibility to call Close on the ReadCloser +// when finished reading. +// +// The ReadCloser returned by NewReader also implements Resetter. +func NewReader(r io.Reader) io.ReadCloser { + return NewReaderOpts(r) +} + // NewReaderDict is like NewReader but initializes the reader // with a preset dictionary. The returned Reader behaves as if // the uncompressed data stream started with the given dictionary, @@ -817,13 +861,5 @@ func NewReader(r io.Reader) io.ReadCloser { // // The ReadCloser returned by NewReader also implements Resetter. func NewReaderDict(r io.Reader, dict []byte) io.ReadCloser { - fixedHuffmanDecoderInit() - - var f decompressor - f.r = makeReader(r) - f.bits = new([maxNumLit + maxNumDist]int) - f.codebits = new([numCodes]int) - f.step = nextBlock - f.dict.init(maxMatchOffset, dict) - return &f + return NewReaderOpts(r, WithDict(dict)) } diff --git a/vendor/github.com/klauspost/compress/flate/level1.go b/vendor/github.com/klauspost/compress/flate/level1.go index 703b9a8..c3581a3 100644 --- a/vendor/github.com/klauspost/compress/flate/level1.go +++ b/vendor/github.com/klauspost/compress/flate/level1.go @@ -1,9 +1,9 @@ package flate import ( - "encoding/binary" "fmt" - "math/bits" + + "github.com/klauspost/compress/internal/le" ) // fastGen maintains the table for matches, @@ -77,6 +77,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { nextS := s var candidate tableEntry + var t int32 for { nextHash := hashLen(cv, tableBits, hashBytes) candidate = e.table[nextHash] @@ -88,9 +89,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now := load6432(src, nextS) e.table[nextHash] = tableEntry{offset: s + e.cur} nextHash = hashLen(now, tableBits, hashBytes) - - offset := s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -103,8 +103,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { now >>= 8 e.table[nextHash] = tableEntry{offset: s + e.cur} - offset = s - (candidate.offset - e.cur) - if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { e.table[nextHash] = tableEntry{offset: nextS + e.cur} break } @@ -120,36 +120,10 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { // literal bytes prior to s. // Extend the 4-byte match as long as possible. - t := candidate.offset - e.cur - var l = int32(4) - if false { - l = e.matchlenLong(s+4, t+4, src) + 4 - } else { - // inlined: - a := src[s+4:] - b := src[t+4:] - for len(a) >= 8 { - if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 { - l += int32(bits.TrailingZeros64(diff) >> 3) - break - } - l += 8 - a = a[8:] - b = b[8:] - } - if len(a) < 8 { - b = b[:len(a)] - for i := range a { - if a[i] != b[i] { - break - } - l++ - } - } - } + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards - for t > 0 && s > nextEmit && src[t-1] == src[s-1] { + for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) { s-- t-- l++ @@ -221,8 +195,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) { candidate = e.table[currHash] e.table[currHash] = tableEntry{offset: o + 2} - offset := s - (candidate.offset - e.cur) - if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) { + t = candidate.offset - e.cur + if s-t > maxMatchOffset || uint32(x) != load3232(src, t) { cv = x >> 8 s++ break diff --git a/vendor/github.com/klauspost/compress/flate/level2.go b/vendor/github.com/klauspost/compress/flate/level2.go index 876dfbe..c8d047f 100644 --- a/vendor/github.com/klauspost/compress/flate/level2.go +++ b/vendor/github.com/klauspost/compress/flate/level2.go @@ -126,7 +126,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/vendor/github.com/klauspost/compress/flate/level3.go b/vendor/github.com/klauspost/compress/flate/level3.go index 7aa2b72..33f9fb1 100644 --- a/vendor/github.com/klauspost/compress/flate/level3.go +++ b/vendor/github.com/klauspost/compress/flate/level3.go @@ -135,7 +135,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. // t := candidate.offset - e.cur - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/vendor/github.com/klauspost/compress/flate/level4.go b/vendor/github.com/klauspost/compress/flate/level4.go index 23c08b3..88509e1 100644 --- a/vendor/github.com/klauspost/compress/flate/level4.go +++ b/vendor/github.com/klauspost/compress/flate/level4.go @@ -98,19 +98,19 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { e.bTable[nextHashL] = entry t = lCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // We got a long match. Use that. break } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... lCandidate = e.bTable[hash7(next, tableBits)] // If the next long is a candidate, check if we should use that instead... - lOff := nextS - (lCandidate.offset - e.cur) - if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) { + lOff := lCandidate.offset - e.cur + if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) { l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:]) if l2 > l1 { s = nextS @@ -127,7 +127,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) { // them as literal bytes. // Extend the 4-byte match as long as possible. - l := e.matchlenLong(s+4, t+4, src) + 4 + l := e.matchlenLong(int(s+4), int(t+4), src) + 4 // Extend backwards for t > 0 && s > nextEmit && src[t-1] == src[s-1] { diff --git a/vendor/github.com/klauspost/compress/flate/level5.go b/vendor/github.com/klauspost/compress/flate/level5.go index 1f61ec1..6e5c215 100644 --- a/vendor/github.com/klauspost/compress/flate/level5.go +++ b/vendor/github.com/klauspost/compress/flate/level5.go @@ -111,16 +111,16 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { + l = e.matchlen(int(s+4), int(t+4), src) + 4 + ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4 if ml1 > l { t = t2 l = ml1 @@ -130,7 +130,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -140,9 +140,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 + l = e.matchlen(int(s+4), int(t+4), src) + 4 lCandidate = e.bTable[nextHashL] // Store the next match @@ -153,8 +153,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -164,8 +164,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -185,9 +185,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { if l == 0 { // Extend the 4-byte match as long as possible. - l = e.matchlenLong(s+4, t+4, src) + 4 + l = e.matchlenLong(int(s+4), int(t+4), src) + 4 } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) + l += e.matchlenLong(int(s+l), int(t+l), src) } // Try to locate a better match by checking the end of best match... @@ -203,7 +203,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) { s2 := s + skipBeginning off := s2 - t2 if t2 >= 0 && off < maxMatchOffset && off > 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 @@ -423,14 +423,14 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { l = e.matchlen(s+4, t+4, src) + 4 ml1 := e.matchlen(s+4, t2+4, src) + 4 if ml1 > l { @@ -442,7 +442,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { break } t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -452,7 +452,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... l = e.matchlen(s+4, t+4, src) + 4 lCandidate = e.bTable[nextHashL] @@ -465,7 +465,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 := lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { + if load3232(src, t2) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 @@ -476,7 +476,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { ml := e.matchlen(nextS+4, t2+4, src) + 4 if ml > l { t = t2 diff --git a/vendor/github.com/klauspost/compress/flate/level6.go b/vendor/github.com/klauspost/compress/flate/level6.go index f1e9d98..96f5bb4 100644 --- a/vendor/github.com/klauspost/compress/flate/level6.go +++ b/vendor/github.com/klauspost/compress/flate/level6.go @@ -113,7 +113,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t = lCandidate.Cur.offset - e.cur if s-t < maxMatchOffset { - if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) { + if uint32(cv) == load3232(src, t) { // Long candidate matches at least 4 bytes. // Store the next match @@ -123,9 +123,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Check the previous long candidate as well. t2 := lCandidate.Prev.offset - e.cur - if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { - l = e.matchlen(s+4, t+4, src) + 4 - ml1 := e.matchlen(s+4, t2+4, src) + 4 + if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) { + l = e.matchlen(int(s+4), int(t+4), src) + 4 + ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4 if ml1 > l { t = t2 l = ml1 @@ -136,7 +136,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // Current value did not match, but check if previous long value does. t = lCandidate.Prev.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Store the next match e.table[nextHashS] = tableEntry{offset: nextS + e.cur} eLong := &e.bTable[nextHashL] @@ -146,9 +146,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } t = sCandidate.offset - e.cur - if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) { + if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) { // Found a 4 match... - l = e.matchlen(s+4, t+4, src) + 4 + l = e.matchlen(int(s+4), int(t+4), src) + 4 // Look up next long candidate (at nextS) lCandidate = e.bTable[nextHashL] @@ -162,7 +162,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { const repOff = 1 t2 := s - repeat + repOff if load3232(src, t2) == uint32(cv>>(8*repOff)) { - ml := e.matchlen(s+4+repOff, t2+4, src) + 4 + ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4 if ml > l { t = t2 l = ml @@ -175,8 +175,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // If the next long is a candidate, use that... t2 = lCandidate.Cur.offset - e.cur if nextS-t2 < maxMatchOffset { - if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -186,8 +186,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { } // If the previous long is a candidate, use that... t2 = lCandidate.Prev.offset - e.cur - if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) { - ml := e.matchlen(nextS+4, t2+4, src) + 4 + if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) { + ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4 if ml > l { t = t2 s = nextS @@ -207,9 +207,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { // Extend the 4-byte match as long as possible. if l == 0 { - l = e.matchlenLong(s+4, t+4, src) + 4 + l = e.matchlenLong(int(s+4), int(t+4), src) + 4 } else if l == maxMatchLength { - l += e.matchlenLong(s+l, t+l, src) + l += e.matchlenLong(int(s+l), int(t+l), src) } // Try to locate a better match by checking the end-of-match... @@ -227,7 +227,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { off := s2 - t2 if off < maxMatchOffset { if off > 0 && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 @@ -237,7 +237,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) { t2 = eLong.Prev.offset - e.cur - l + skipBeginning off := s2 - t2 if off > 0 && off < maxMatchOffset && t2 >= 0 { - if l2 := e.matchlenLong(s2, t2, src); l2 > l { + if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l { t = t2 l = l2 s = s2 diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go deleted file mode 100644 index 4bd3885..0000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.go +++ /dev/null @@ -1,16 +0,0 @@ -//go:build amd64 && !appengine && !noasm && gc -// +build amd64,!appengine,!noasm,gc - -// Copyright 2019+ Klaus Post. All rights reserved. -// License information can be found in the LICENSE file. - -package flate - -// matchLen returns how many bytes match in a and b -// -// It assumes that: -// -// len(a) <= len(b) and len(a) > 0 -// -//go:noescape -func matchLen(a []byte, b []byte) int diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s b/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s deleted file mode 100644 index 0782b86..0000000 --- a/vendor/github.com/klauspost/compress/flate/matchlen_amd64.s +++ /dev/null @@ -1,66 +0,0 @@ -// Copied from S2 implementation. - -//go:build !appengine && !noasm && gc && !noasm - -#include "textflag.h" - -// func matchLen(a []byte, b []byte) int -TEXT ·matchLen(SB), NOSPLIT, $0-56 - MOVQ a_base+0(FP), AX - MOVQ b_base+24(FP), CX - MOVQ a_len+8(FP), DX - - // matchLen - XORL SI, SI - CMPL DX, $0x08 - JB matchlen_match4_standalone - -matchlen_loopback_standalone: - MOVQ (AX)(SI*1), BX - XORQ (CX)(SI*1), BX - JZ matchlen_loop_standalone - -#ifdef GOAMD64_v3 - TZCNTQ BX, BX -#else - BSFQ BX, BX -#endif - SHRL $0x03, BX - LEAL (SI)(BX*1), SI - JMP gen_match_len_end - -matchlen_loop_standalone: - LEAL -8(DX), DX - LEAL 8(SI), SI - CMPL DX, $0x08 - JAE matchlen_loopback_standalone - -matchlen_match4_standalone: - CMPL DX, $0x04 - JB matchlen_match2_standalone - MOVL (AX)(SI*1), BX - CMPL (CX)(SI*1), BX - JNE matchlen_match2_standalone - LEAL -4(DX), DX - LEAL 4(SI), SI - -matchlen_match2_standalone: - CMPL DX, $0x02 - JB matchlen_match1_standalone - MOVW (AX)(SI*1), BX - CMPW (CX)(SI*1), BX - JNE matchlen_match1_standalone - LEAL -2(DX), DX - LEAL 2(SI), SI - -matchlen_match1_standalone: - CMPL DX, $0x01 - JB gen_match_len_end - MOVB (AX)(SI*1), BL - CMPB (CX)(SI*1), BL - JNE gen_match_len_end - INCL SI - -gen_match_len_end: - MOVQ SI, ret+48(FP) - RET diff --git a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go index ad5cd81..6149384 100644 --- a/vendor/github.com/klauspost/compress/flate/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/flate/matchlen_generic.go @@ -1,27 +1,29 @@ -//go:build !amd64 || appengine || !gc || noasm -// +build !amd64 appengine !gc noasm - // Copyright 2019+ Klaus Post. All rights reserved. // License information can be found in the LICENSE file. package flate import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { break @@ -29,5 +31,4 @@ func matchLen(a, b []byte) (n int) { n++ } return n - } diff --git a/vendor/github.com/klauspost/compress/flate/stateless.go b/vendor/github.com/klauspost/compress/flate/stateless.go index f3d4139..13b9b10 100644 --- a/vendor/github.com/klauspost/compress/flate/stateless.go +++ b/vendor/github.com/klauspost/compress/flate/stateless.go @@ -4,6 +4,8 @@ import ( "io" "math" "sync" + + "github.com/klauspost/compress/internal/le" ) const ( @@ -152,18 +154,11 @@ func hashSL(u uint32) uint32 { } func load3216(b []byte, i int16) uint32 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:4] - return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return le.Load32(b, i) } func load6416(b []byte, i int16) uint64 { - // Help the compiler eliminate bounds checks on the read so it can be done in a single read. - b = b[i:] - b = b[:8] - return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | - uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return le.Load64(b, i) } func statelessEnc(dst *tokens, src []byte, startAt int16) { diff --git a/vendor/github.com/klauspost/compress/fse/decompress.go b/vendor/github.com/klauspost/compress/fse/decompress.go index cc05d0f..0c7dd4f 100644 --- a/vendor/github.com/klauspost/compress/fse/decompress.go +++ b/vendor/github.com/klauspost/compress/fse/decompress.go @@ -15,7 +15,7 @@ const ( // It is possible, but by no way guaranteed that corrupt data will // return an error. // It is up to the caller to verify integrity of the returned data. -// Use a predefined Scrach to set maximum acceptable output size. +// Use a predefined Scratch to set maximum acceptable output size. func Decompress(b []byte, s *Scratch) ([]byte, error) { s, err := s.prepare(b) if err != nil { diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index e36d974..bfc7a52 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -6,10 +6,11 @@ package huff0 import ( - "encoding/binary" "errors" "fmt" "io" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error { return nil } -// peekBitsFast requires that at least one bit is requested every time. +// peekByteFast requires that at least one byte is requested every time. // There are no checks if the buffer is filled. func (b *bitReaderBytes) peekByteFast() uint8 { got := uint8(b.value >> 56) @@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() { } // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() { // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. func (b *bitReaderBytes) fillFastStart() { // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() { if b.bitsRead < 32 { return } - if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + if b.off >= 4 { + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << (b.bitsRead - 32) b.bitsRead -= 32 b.off -= 4 @@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() { return } - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 @@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() { // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. func (b *bitReaderShifted) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) + b.value = le.Load64(b.in, b.off-8) b.bitsRead = 0 b.off -= 8 } @@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() { return } if b.off > 4 { - v := b.in[b.off-4 : b.off] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) + low := le.Load32(b.in, b.off-4) b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.bitsRead -= 32 b.off -= 4 diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 54bd08b..0f56b02 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -1136,7 +1136,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 0 { - fmt.Fprintf(w, "%d errros in base, stopping\n", errs) + fmt.Fprintf(w, "%d errors in base, stopping\n", errs) continue } // Ensure that all combinations are covered. @@ -1152,7 +1152,7 @@ func (s *Scratch) matches(ct cTable, w io.Writer) { errs++ } if errs > 20 { - fmt.Fprintf(w, "%d errros, stopping\n", errs) + fmt.Fprintf(w, "%d errors, stopping\n", errs) break } } diff --git a/vendor/github.com/klauspost/compress/internal/le/le.go b/vendor/github.com/klauspost/compress/internal/le/le.go new file mode 100644 index 0000000..e54909e --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/le.go @@ -0,0 +1,5 @@ +package le + +type Indexer interface { + int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64 +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go new file mode 100644 index 0000000..0cfb5c0 --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go @@ -0,0 +1,42 @@ +//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine + +package le + +import ( + "encoding/binary" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + return b[i] +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + return binary.LittleEndian.Uint16(b[i:]) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + return binary.LittleEndian.Uint32(b[i:]) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + return binary.LittleEndian.Uint64(b[i:]) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + binary.LittleEndian.PutUint16(b, v) +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + binary.LittleEndian.PutUint32(b, v) +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + binary.LittleEndian.PutUint64(b, v) +} diff --git a/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go new file mode 100644 index 0000000..ada45cd --- /dev/null +++ b/vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go @@ -0,0 +1,55 @@ +// We enable 64 bit LE platforms: + +//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine + +package le + +import ( + "unsafe" +) + +// Load8 will load from b at index i. +func Load8[I Indexer](b []byte, i I) byte { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load16 will load from b at index i. +func Load16[I Indexer](b []byte, i I) uint16 { + //return binary.LittleEndian.Uint16(b[i:]) + //return *(*uint16)(unsafe.Pointer(&b[i])) + return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load32 will load from b at index i. +func Load32[I Indexer](b []byte, i I) uint32 { + //return binary.LittleEndian.Uint32(b[i:]) + //return *(*uint32)(unsafe.Pointer(&b[i])) + return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Load64 will load from b at index i. +func Load64[I Indexer](b []byte, i I) uint64 { + //return binary.LittleEndian.Uint64(b[i:]) + //return *(*uint64)(unsafe.Pointer(&b[i])) + return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i)) +} + +// Store16 will store v at b. +func Store16(b []byte, v uint16) { + //binary.LittleEndian.PutUint16(b, v) + *(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store32 will store v at b. +func Store32(b []byte, v uint32) { + //binary.LittleEndian.PutUint32(b, v) + *(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v +} + +// Store64 will store v at b. +func Store64(b []byte, v uint64) { + //binary.LittleEndian.PutUint64(b, v) + *(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v +} diff --git a/vendor/github.com/klauspost/compress/s2sx.mod b/vendor/github.com/klauspost/compress/s2sx.mod index 5a4412f..81bda5e 100644 --- a/vendor/github.com/klauspost/compress/s2sx.mod +++ b/vendor/github.com/klauspost/compress/s2sx.mod @@ -1,4 +1,3 @@ module github.com/klauspost/compress -go 1.19 - +go 1.22 diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index 92e2347..c11d7fa 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. -This package is pure Go and without use of "unsafe". +This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features. The `zstd` package is provided as open source software using a Go standard license. diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 25ca983..d41e3e1 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -5,11 +5,12 @@ package zstd import ( - "encoding/binary" "errors" "fmt" "io" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // bitReader reads a bitstream in reverse. @@ -18,6 +19,7 @@ import ( type bitReader struct { in []byte value uint64 // Maybe use [16]byte, but shifting is awkward. + cursor int // offset where next read should end bitsRead uint8 } @@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error { if v == 0 { return errors.New("corrupt stream, did not find end of stream") } + b.cursor = len(in) b.bitsRead = 64 b.value = 0 if len(in) >= 8 { @@ -67,18 +70,15 @@ func (b *bitReader) fillFast() { if b.bitsRead < 32 { return } - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 } // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. func (b *bitReader) fillFastStart() { - v := b.in[len(b.in)-8:] - b.in = b.in[:len(b.in)-8] - b.value = binary.LittleEndian.Uint64(v) + b.cursor -= 8 + b.value = le.Load64(b.in, b.cursor) b.bitsRead = 0 } @@ -87,25 +87,23 @@ func (b *bitReader) fill() { if b.bitsRead < 32 { return } - if len(b.in) >= 4 { - v := b.in[len(b.in)-4:] - b.in = b.in[:len(b.in)-4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) + if b.cursor >= 4 { + b.cursor -= 4 + b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor)) b.bitsRead -= 32 return } - b.bitsRead -= uint8(8 * len(b.in)) - for len(b.in) > 0 { - b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) - b.in = b.in[:len(b.in)-1] + b.bitsRead -= uint8(8 * b.cursor) + for b.cursor > 0 { + b.cursor -= 1 + b.value = (b.value << 8) | uint64(b.in[b.cursor]) } } // finished returns true if all bits have been read from the bit stream. func (b *bitReader) finished() bool { - return len(b.in) == 0 && b.bitsRead >= 64 + return b.cursor == 0 && b.bitsRead >= 64 } // overread returns true if more bits have been requested than is on the stream. @@ -115,13 +113,14 @@ func (b *bitReader) overread() bool { // remain returns the number of bits remaining. func (b *bitReader) remain() uint { - return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) + return 8*uint(b.cursor) + 64 - uint(b.bitsRead) } // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReader) close() error { // Release reference. b.in = nil + b.cursor = 0 if !b.finished() { return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index 03744fb..0dd742f 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -5,14 +5,10 @@ package zstd import ( - "bytes" - "encoding/binary" "errors" "fmt" "hash/crc32" "io" - "os" - "path/filepath" "sync" "github.com/klauspost/compress/huff0" @@ -598,7 +594,9 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { printf("RLE set to 0x%x, code: %v", symb, v) } case compModeFSE: - println("Reading table for", tableIndex(i)) + if debugDecoder { + println("Reading table for", tableIndex(i)) + } if seq.fse == nil || seq.fse.preDefined { seq.fse = fseDecoderPool.Get().(*fseDecoder) } @@ -646,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { println("initializing sequences:", err) return err } - // Extract blocks... - if false && hist.dict == nil { - fatalErr := func(err error) { - if err != nil { - panic(err) - } - } - fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize) - var buf bytes.Buffer - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse)) - fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse)) - buf.Write(in) - os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm) - } return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/blockenc.go b/vendor/github.com/klauspost/compress/zstd/blockenc.go index 32a7f40..fd35ea1 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockenc.go +++ b/vendor/github.com/klauspost/compress/zstd/blockenc.go @@ -9,6 +9,7 @@ import ( "fmt" "math" "math/bits" + "slices" "github.com/klauspost/compress/huff0" ) @@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int { // All 0 return 0 } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } - cnt := maxCount(hist[:maxSym]) + cnt := int(slices.Max(hist[:maxSym])) if cnt == len(data) { // RLE return 0 @@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() { } } } - maxCount := func(a []uint32) int { - var max uint32 - for _, v := range a { - if v > max { - max = v - } - } - return int(max) - } if debugAsserts && mlMax > maxMatchLengthSymbol { panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) } @@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() { panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) } - b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) - b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) - b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) + b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1]))) + b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1]))) + b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1]))) } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index bbca172..ea2a193 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { } // Read bytes from the decompressed stream into p. -// Returns the number of bytes written and any error that occurred. +// Returns the number of bytes read and any error that occurred. // When the stream is done, io.EOF will be returned. func (d *Decoder) Read(p []byte) (int, error) { var n int @@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { frame.bBuf = nil if frame.history.decoders.br != nil { frame.history.decoders.br.in = nil + frame.history.decoders.br.cursor = 0 } d.decoders <- block }() diff --git a/vendor/github.com/klauspost/compress/zstd/enc_base.go b/vendor/github.com/klauspost/compress/zstd/enc_base.go index 5ca4603..7d250c6 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_base.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_base.go @@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 { panic(err) } if t < 0 { - err := fmt.Sprintf("s (%d) < 0", s) + err := fmt.Sprintf("t (%d) < 0", t) panic(err) } if s-t > e.maxMatchOff { diff --git a/vendor/github.com/klauspost/compress/zstd/enc_better.go b/vendor/github.com/klauspost/compress/zstd/enc_better.go index a4f5bf9..84a79fd 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_better.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_better.go @@ -179,9 +179,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -210,12 +210,12 @@ encodeLoop: // Index match start+1 (long) -> s - 1 index0 := s + repOff - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -241,9 +241,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -270,11 +270,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -708,9 +708,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -738,12 +738,12 @@ encodeLoop: blk.sequences = append(blk.sequences, seq) // Index match start+1 (long) -> s - 1 - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -772,9 +772,9 @@ encodeLoop: if false && repIndex >= 0 && load6432(src, repIndex) == load6432(src, s+repOff) { // Consider history as well. var seq seq - lenght := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) + length := 8 + e.matchlen(s+8+repOff2, repIndex+8, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -801,11 +801,11 @@ encodeLoop: } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff2 + s += length + repOff2 nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go index a154c18..d36be7b 100644 --- a/vendor/github.com/klauspost/compress/zstd/enc_dfast.go +++ b/vendor/github.com/klauspost/compress/zstd/enc_dfast.go @@ -138,9 +138,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -166,11 +166,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop @@ -798,9 +798,9 @@ encodeLoop: if repIndex >= 0 && load3232(src, repIndex) == uint32(cv>>(repOff*8)) { // Consider history as well. var seq seq - lenght := 4 + e.matchlen(s+4+repOff, repIndex+4, src) + length := 4 + e.matchlen(s+4+repOff, repIndex+4, src) - seq.matchLen = uint32(lenght - zstdMinMatch) + seq.matchLen = uint32(length - zstdMinMatch) // We might be able to match backwards. // Extend as long as we can. @@ -826,11 +826,11 @@ encodeLoop: println("repeat sequence", seq, "next s:", s) } blk.sequences = append(blk.sequences, seq) - s += lenght + repOff + s += length + repOff nextEmit = s if s >= sLimit { if debugEncoder { - println("repeat ended", s, lenght) + println("repeat ended", s, length) } break encodeLoop diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index 72af7ef..8f8223c 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -6,6 +6,7 @@ package zstd import ( "crypto/rand" + "errors" "fmt" "io" "math" @@ -149,6 +150,9 @@ func (e *Encoder) ResetContentSize(w io.Writer, size int64) { // and write CRC if requested. func (e *Encoder) Write(p []byte) (n int, err error) { s := &e.state + if s.eofWritten { + return 0, ErrEncoderClosed + } for len(p) > 0 { if len(p)+len(s.filling) < e.o.blockSize { if e.o.crc { @@ -202,7 +206,7 @@ func (e *Encoder) nextBlock(final bool) error { return nil } if final && len(s.filling) > 0 { - s.current = e.EncodeAll(s.filling, s.current[:0]) + s.current = e.encodeAll(s.encoder, s.filling, s.current[:0]) var n2 int n2, s.err = s.w.Write(s.current) if s.err != nil { @@ -288,6 +292,9 @@ func (e *Encoder) nextBlock(final bool) error { s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) s.wg.Add(1) + if final { + s.eofWritten = true + } go func(src []byte) { if debugEncoder { println("Adding block,", len(src), "bytes, final:", final) @@ -303,9 +310,6 @@ func (e *Encoder) nextBlock(final bool) error { blk := enc.Block() enc.Encode(blk, src) blk.last = final - if final { - s.eofWritten = true - } // Wait for pending writes. s.wWg.Wait() if s.writeErr != nil { @@ -401,12 +405,20 @@ func (e *Encoder) Flush() error { if len(s.filling) > 0 { err := e.nextBlock(false) if err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } } s.wg.Wait() s.wWg.Wait() if s.err != nil { + // Ignore Flush after Close. + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return s.err } return s.writeErr @@ -422,6 +434,9 @@ func (e *Encoder) Close() error { } err := e.nextBlock(true) if err != nil { + if errors.Is(s.err, ErrEncoderClosed) { + return nil + } return err } if s.frameContentSize > 0 { @@ -459,6 +474,11 @@ func (e *Encoder) Close() error { } _, s.err = s.w.Write(frame) } + if s.err == nil { + s.err = ErrEncoderClosed + return nil + } + return s.err } @@ -469,6 +489,15 @@ func (e *Encoder) Close() error { // Data compressed with EncodeAll can be decoded with the Decoder, // using either a stream or DecodeAll. func (e *Encoder) EncodeAll(src, dst []byte) []byte { + e.init.Do(e.initialize) + enc := <-e.encoders + defer func() { + e.encoders <- enc + }() + return e.encodeAll(enc, src, dst) +} + +func (e *Encoder) encodeAll(enc encoder, src, dst []byte) []byte { if len(src) == 0 { if e.o.fullZero { // Add frame header. @@ -491,13 +520,7 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte { } return dst } - e.init.Do(e.initialize) - enc := <-e.encoders - defer func() { - // Release encoder reference to last block. - // If a non-single block is needed the encoder will reset again. - e.encoders <- enc - }() + // Use single segments when above minimum window and below window size. single := len(src) <= e.o.windowSize && len(src) > MinWindowSize if e.o.single != nil { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 53e160f..e47af66 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -146,7 +146,9 @@ func (d *frameDec) reset(br byteBuffer) error { } return err } - printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + if debugDecoder { + printf("raw: %x, mantissa: %d, exponent: %d\n", wd, wd&7, wd>>3) + } windowLog := 10 + (wd >> 3) windowBase := uint64(1) << windowLog windowAdd := (windowBase / 8) * uint64(wd&0x7) diff --git a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go index 57b9c31..bea1779 100644 --- a/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/matchlen_generic.go @@ -7,20 +7,25 @@ package zstd import ( - "encoding/binary" "math/bits" + + "github.com/klauspost/compress/internal/le" ) // matchLen returns the maximum common prefix length of a and b. // a must be the shortest of the two. func matchLen(a, b []byte) (n int) { - for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { - diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) + left := len(a) + for left >= 8 { + diff := le.Load64(a, n) ^ le.Load64(b, n) if diff != 0 { return n + bits.TrailingZeros64(diff)>>3 } n += 8 + left -= 8 } + a = a[n:] + b = b[n:] for i := range a { if a[i] != b[i] { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index d7fe6d8..9a7de82 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error { return io.ErrUnexpectedEOF } var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go index 8adabd8..c59f17e 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go @@ -146,7 +146,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) { return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize) default: - return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode) + return true, fmt.Errorf("sequenceDecs_decode returned erroneous code %d", errCode) } s.seqSize += ctx.litRemain @@ -292,7 +292,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { return io.ErrUnexpectedEOF } - return fmt.Errorf("sequenceDecs_decode_amd64 returned erronous code %d", errCode) + return fmt.Errorf("sequenceDecs_decode_amd64 returned erroneous code %d", errCode) } if ctx.litRemain < 0 { diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s index 5b06174..a708ca6 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s @@ -7,9 +7,9 @@ TEXT ·sequenceDecs_decode_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -335,9 +335,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok: MOVQ R13, 160(AX) MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -634,9 +634,9 @@ error_overread: TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -920,9 +920,9 @@ error_overread: TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok: MOVQ R12, 160(CX) MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Return success MOVQ $0x00000000, ret+24(FP) @@ -1787,9 +1787,9 @@ empty_seqs: TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -1814,7 +1814,7 @@ TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -2281,8 +2281,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -2349,9 +2349,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -2376,7 +2376,7 @@ TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -2801,8 +2801,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX @@ -2869,9 +2869,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ br+8(FP), CX MOVQ 24(CX), DX - MOVBQZX 32(CX), BX + MOVBQZX 40(CX), BX MOVQ (CX), AX - MOVQ 8(CX), SI + MOVQ 32(CX), SI ADDQ SI, AX MOVQ AX, (SP) MOVQ ctx+16(FP), AX @@ -2896,7 +2896,7 @@ TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 MOVQ 40(SP), AX ADDQ AX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R10, 32(SP) // outBase += outPosition @@ -3465,8 +3465,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), AX MOVQ DX, 24(AX) - MOVB BL, 32(AX) - MOVQ SI, 8(AX) + MOVB BL, 40(AX) + MOVQ SI, 32(AX) // Update the context MOVQ ctx+16(FP), AX @@ -3533,9 +3533,9 @@ error_not_enough_space: TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ br+8(FP), BX MOVQ 24(BX), AX - MOVBQZX 32(BX), DX + MOVBQZX 40(BX), DX MOVQ (BX), CX - MOVQ 8(BX), BX + MOVQ 32(BX), BX ADDQ BX, CX MOVQ CX, (SP) MOVQ ctx+16(FP), CX @@ -3560,7 +3560,7 @@ TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 MOVQ 40(SP), CX ADDQ CX, 48(SP) - // Calculate poiter to s.out[cap(s.out)] (a past-end pointer) + // Calculate pointer to s.out[cap(s.out)] (a past-end pointer) ADDQ R9, 32(SP) // outBase += outPosition @@ -4087,8 +4087,8 @@ handle_loop: loop_finished: MOVQ br+8(FP), CX MOVQ AX, 24(CX) - MOVB DL, 32(CX) - MOVQ BX, 8(CX) + MOVB DL, 40(CX) + MOVQ BX, 32(CX) // Update the context MOVQ ctx+16(FP), AX diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go index 2fb35b7..7cec219 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec_generic.go @@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error { } for i := range seqs { var ll, mo, ml int - if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { + if br.cursor > 4+((maxOffsetBits+16+16)>>3) { // inlined function: // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) diff --git a/vendor/github.com/klauspost/compress/zstd/seqenc.go b/vendor/github.com/klauspost/compress/zstd/seqenc.go index 8014174..65045ea 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqenc.go +++ b/vendor/github.com/klauspost/compress/zstd/seqenc.go @@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{ func llCode(litLength uint32) uint8 { const llDeltaCode = 19 if litLength <= 63 { - // Compiler insists on bounds check (Go 1.12) return llCodeTable[litLength&63] } return uint8(highBit(litLength)) + llDeltaCode @@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{ func mlCode(mlBase uint32) uint8 { const mlDeltaCode = 36 if mlBase <= 127 { - // Compiler insists on bounds check (Go 1.12) return mlCodeTable[mlBase&127] } return uint8(highBit(mlBase)) + mlDeltaCode diff --git a/vendor/github.com/klauspost/compress/zstd/snappy.go b/vendor/github.com/klauspost/compress/zstd/snappy.go index ec13594..a17381b 100644 --- a/vendor/github.com/klauspost/compress/zstd/snappy.go +++ b/vendor/github.com/klauspost/compress/zstd/snappy.go @@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue @@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) { } n, r.err = w.Write(r.block.output) if r.err != nil { - return written, err + return written, r.err } written += int64(n) continue diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index 4be7cc7..6252b46 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -5,10 +5,11 @@ package zstd import ( "bytes" - "encoding/binary" "errors" "log" "math" + + "github.com/klauspost/compress/internal/le" ) // enable debug printing @@ -88,6 +89,10 @@ var ( // Close has been called. ErrDecoderClosed = errors.New("decoder used after Close") + // ErrEncoderClosed will be returned if the Encoder was used after + // Close has been called. + ErrEncoderClosed = errors.New("encoder used after Close") + // ErrDecoderNilInput is returned when a nil Reader was provided // and an operation other than Reset/DecodeAll/Close was attempted. ErrDecoderNilInput = errors.New("nil input provided as reader") @@ -106,11 +111,11 @@ func printf(format string, a ...interface{}) { } func load3232(b []byte, i int32) uint32 { - return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) + return le.Load32(b, i) } func load6432(b []byte, i int32) uint64 { - return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) + return le.Load64(b, i) } type byter interface { diff --git a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go index e964654..04aaca8 100644 --- a/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go +++ b/vendor/github.com/pierrec/lz4/v4/internal/lz4stream/block.go @@ -246,7 +246,7 @@ func (b *FrameDataBlock) Compress(f *Frame, src []byte, level lz4block.Compressi b.src = src // keep track of the source for content checksum if f.Descriptor.Flags.BlockChecksum() { - b.Checksum = xxh32.ChecksumZero(src) + b.Checksum = xxh32.ChecksumZero(b.Data) } return b } @@ -328,7 +328,7 @@ func (b *FrameDataBlock) Uncompress(f *Frame, dst, dict []byte, sum bool) ([]byt dst = dst[:n] } if f.Descriptor.Flags.BlockChecksum() { - if c := xxh32.ChecksumZero(dst); c != b.Checksum { + if c := xxh32.ChecksumZero(b.data); c != b.Checksum { err := fmt.Errorf("%w: got %x; expected %x", lz4errors.ErrInvalidBlockChecksum, c, b.Checksum) return nil, err } diff --git a/vendor/modules.txt b/vendor/modules.txt index adcf4a5..7598a72 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,5 +1,5 @@ -# github.com/IBM/sarama v1.43.2 -## explicit; go 1.19 +# github.com/IBM/sarama v1.45.0 +## explicit; go 1.21 github.com/IBM/sarama # github.com/apapsch/go-jsonmerge/v2 v2.0.0 ## explicit; go 1.12 @@ -29,7 +29,7 @@ github.com/cloudevents/sdk-go/v2/types # github.com/davecgh/go-spew v1.1.1 ## explicit github.com/davecgh/go-spew/spew -# github.com/eapache/go-resiliency v1.6.0 +# github.com/eapache/go-resiliency v1.7.0 ## explicit; go 1.13 github.com/eapache/go-resiliency/breaker # github.com/eapache/go-xerial-snappy v0.0.0-20230731223053-c322873962e3 @@ -48,8 +48,8 @@ github.com/globalsign/mgo/internal/scram # github.com/golang/snappy v0.0.4 ## explicit github.com/golang/snappy -# github.com/google/go-cmp v0.6.0 -## explicit; go 1.13 +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 # github.com/google/uuid v1.6.0 ## explicit github.com/google/uuid @@ -114,14 +114,15 @@ github.com/json-iterator/go # github.com/kelseyhightower/envconfig v1.4.0 ## explicit github.com/kelseyhightower/envconfig -# github.com/klauspost/compress v1.17.9 -## explicit; go 1.20 +# github.com/klauspost/compress v1.18.0 +## explicit; go 1.22 github.com/klauspost/compress github.com/klauspost/compress/flate github.com/klauspost/compress/fse github.com/klauspost/compress/gzip github.com/klauspost/compress/huff0 github.com/klauspost/compress/internal/cpuinfo +github.com/klauspost/compress/internal/le github.com/klauspost/compress/internal/snapref github.com/klauspost/compress/zstd github.com/klauspost/compress/zstd/internal/xxhash @@ -137,7 +138,7 @@ github.com/modern-go/reflect2 ## explicit; go 1.20 github.com/oapi-codegen/runtime github.com/oapi-codegen/runtime/types -# github.com/pierrec/lz4/v4 v4.1.21 +# github.com/pierrec/lz4/v4 v4.1.22 ## explicit; go 1.14 github.com/pierrec/lz4/v4 github.com/pierrec/lz4/v4/internal/lz4block @@ -147,8 +148,8 @@ github.com/pierrec/lz4/v4/internal/xxh32 # github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 ## explicit github.com/rcrowley/go-metrics -# github.com/rogpeppe/go-internal v1.12.0 -## explicit; go 1.20 +# github.com/rogpeppe/go-internal v1.14.1 +## explicit; go 1.23 # github.com/xdg/scram v1.0.5 ## explicit github.com/xdg/scram @@ -169,16 +170,16 @@ go.uber.org/zap/internal/exit go.uber.org/zap/internal/pool go.uber.org/zap/internal/stacktrace go.uber.org/zap/zapcore -# golang.org/x/crypto v0.32.0 -## explicit; go 1.20 +# golang.org/x/crypto v0.35.0 +## explicit; go 1.23.0 golang.org/x/crypto/md4 golang.org/x/crypto/pbkdf2 -# golang.org/x/net v0.34.0 +# golang.org/x/net v0.35.0 ## explicit; go 1.18 golang.org/x/net/http2/hpack golang.org/x/net/internal/socks golang.org/x/net/proxy -# golang.org/x/text v0.21.0 +# golang.org/x/text v0.22.0 ## explicit; go 1.18 golang.org/x/text/transform golang.org/x/text/unicode/norm