Skip to content

Commit

Permalink
Merge pull request #1 from VultureProject/devel
Browse files Browse the repository at this point in the history
Bug fix in bucket_brigades treatment
  • Loading branch information
HugoSoszynski authored Apr 4, 2018
2 parents baca677 + 39b531d commit 9efbe6e
Show file tree
Hide file tree
Showing 4 changed files with 79 additions and 29 deletions.
2 changes: 1 addition & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -74,4 +74,4 @@ script:
after_script:
- sudo cat /var/log/apache2/error.log
- sudo cat /var/log/apache2/defender_match.log
- sudo cat /var/log/apache2/defender_json_match.log
- sudo head /var/log/apache2/defender_json_match.log
18 changes: 13 additions & 5 deletions RuntimeScanner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -694,7 +694,7 @@ void RuntimeScanner::addHeader(char *key, char *val) {
else if (k == "transfer-encoding") {
transferEncodingProvided = true;
if (v == "chunked") {
transferEncoding = TRANSFER_ENCOING_CHUNKED;
transferEncoding = TRANSFER_ENCODING_CHUNKED;
}
}
headers.push_back(make_pair(k, v));
Expand Down Expand Up @@ -888,6 +888,7 @@ void RuntimeScanner::writeJSONLearningLog() {
return;

stringstream jsonlog;
stringstream unique_data;
std::time_t result = std::time(nullptr);
std::asctime(std::localtime(&result));
jsonlog << "{\"time\":";
Expand All @@ -910,28 +911,35 @@ void RuntimeScanner::writeJSONLearningLog() {
for (const auto &matchInfoPair : matchInfos) {
const match_info_t &matchInfo = matchInfoPair.second;
jsonlog << "{\"zone\":\"" << matchInfo.zone << "\",";
unique_data << "" << matchInfo.zone;

jsonlog << "\"id\":[";
for (const unsigned long &ruleId : matchInfo.ruleId)
for (const unsigned long &ruleId : matchInfo.ruleId) {
jsonlog << ruleId << ",";
unique_data << ruleId;
}
jsonlog.seekp(-1, std::ios_base::end);
jsonlog << "]";

if (!matchInfo.varname.empty())
if (!matchInfo.varname.empty()) {
jsonlog << ",\"var_name\":\"" << escapeQuotes(matchInfo.varname) << "\"";
unique_data << "" << escapeQuotes(matchInfo.varname);
}
if (extensiveLearning && !matchInfo.content.empty())
jsonlog << ",\"content\":\"" << escapeQuotes(matchInfo.content) << "\"";
jsonlog << "},";
}

if (matchInfos.size() > 0) jsonlog.seekp(-1, std::ios_base::end);
jsonlog << "],";

jsonlog << "\"client\":\"" << clientIp << "\",";
jsonlog << "\"server\":\"" << serverHostname << "\",";
jsonlog << "\"method\":\"" << methods[method] << "\",";
jsonlog << "\"protocol\":\"" << protocol << "\",";
jsonlog << "\"unparsed_uri\":\"" << fullUri << "\"";
jsonlog << "\"unparsed_uri\":\"" << fullUri << "\",";
unique_data << "" << uri;
jsonlog << "\"context_id\":\"" << unique_data.str() << "\"";

jsonlog << "}" << endl;
streamToFile(jsonlog, learningJSONLogFile);
Expand Down
4 changes: 3 additions & 1 deletion RuntimeScanner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@

#define PASS -1
#define STOP 403
/* used for reading input blocks */
#define READ_BLOCKSIZE 2048

using namespace Util;
using std::pair;
Expand Down Expand Up @@ -78,7 +80,7 @@ enum CONTENT_TYPE {

enum TRANSFER_ENCODING {
TRANSFER_ENCODING_UNSUPPORTED = 0,
TRANSFER_ENCOING_CHUNKED
TRANSFER_ENCODING_CHUNKED
};

enum LOG_LVL {
Expand Down
84 changes: 62 additions & 22 deletions mod_defender.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
APLOG_USE_MODULE(defender);
#endif

#define MAX_BB_SIZE 0x7FFFFFFF

/*
* Per-directory configuration structure
*/
Expand Down Expand Up @@ -276,80 +278,103 @@ static int fixups(request_rec *r) {
if (scanner->transferEncodingProvided /*&& scanner->transferEncoding == TRANSFER_ENCODING_UNSUPPORTED*/)
return HTTP_NOT_IMPLEMENTED;

if( scanner->contentLength >= MAX_BB_SIZE ) {
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "Content-Length '%lu' is greater than process limit : %d",
scanner->contentLength, MAX_BB_SIZE);
return DECLINED;
}

// Retrieve the body
// Pre-allocate necessary bytes
scanner->body.reserve(scanner->contentLength);

// Wait for the body to be fully received
apr_bucket_brigade *bb = apr_brigade_create(r->pool, r->connection->bucket_alloc);
unsigned int prev_nr_buckets = 0;
int seen_eos = 0;
apr_status_t status;
apr_bucket *bucket = NULL;
apr_size_t ttbytes = 0;

if (bb == NULL)
goto read_error_out;
do {
int rc = ap_get_brigade(r->input_filters, bb, AP_MODE_SPECULATIVE, APR_BLOCK_READ, LONG_MAX);
if (rc != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Error reading body: %s", get_apr_error(r->pool, rc));
if( (status = ap_get_brigade(r->input_filters, bb, AP_MODE_SPECULATIVE, APR_BLOCK_READ, MAX_BB_SIZE) )
!= APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Error reading body: %s", get_apr_error(r->pool, status));
goto read_error_out;
}

unsigned int nr_buckets = 0;
// Iterate over buckets
for (apr_bucket *bucket = APR_BRIGADE_FIRST(bb);
for (bucket = APR_BRIGADE_FIRST(bb);
bucket != APR_BRIGADE_SENTINEL(bb); bucket = APR_BUCKET_NEXT(bucket)) {
// Stop if we reach the EOS bucket
if (APR_BUCKET_IS_EOS(bucket))
if (APR_BUCKET_IS_EOS(bucket)) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "EOS bucket reached.");
seen_eos = 1;
break;
}

// Ignore non data buckets
if (APR_BUCKET_IS_METADATA(bucket) || APR_BUCKET_IS_FLUSH(bucket))
if (APR_BUCKET_IS_METADATA(bucket) || APR_BUCKET_IS_FLUSH(bucket)) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Bucket is METADATA or FLUSH.");
continue;
}

nr_buckets++;
// Skip already copied buckets
if (nr_buckets <= prev_nr_buckets)
if (nr_buckets <= prev_nr_buckets) {
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Skip already copied bucket: %u / %u.", nr_buckets,
prev_nr_buckets);
continue;
}

const char *buf;
apr_size_t nbytes;
int rv = apr_bucket_read(bucket, &buf, &nbytes, APR_BLOCK_READ);
if (rv != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Failed reading input / bucket: %s",
get_apr_error(r->pool, rv));
apr_size_t nbytes = MAX_BB_SIZE;

if ((status = apr_bucket_read(bucket, &buf, &nbytes, APR_BLOCK_READ)) != APR_SUCCESS) {
ap_log_rerror(APLOG_MARK, APLOG_ERR, 0, r, "Failed reading input / bucket: %s",
get_apr_error(r->pool, status));
goto read_error_out;
}

// cerr << "bucket #" << nr_buckets - 1 << " received, nbytes: " << nbytes << ", total len: "
// << scanner->body.length() + nbytes << endl;
ttbytes += nbytes;
ap_log_rerror(APLOG_MARK, APLOG_TRACE1, 0, r, "Bucket successfully read: %lu bytes. Total length=%lu.",
nbytes, ttbytes);

// More bytes in the BODY than specified in the content-length
if (scanner->body.length() + nbytes > scanner->contentLength) {
ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Too much POST data: received body of %lu bytes but "
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "Too much POST data: received body of %lu bytes but "
"got content-length: %lu", scanner->body.length() + nbytes, scanner->contentLength);
goto read_error_out;
}

// More bytes in the BODY than specified by the allowed body limit
if (scanner->body.length() + nbytes > dcfg->requestBodyLimit) {
scanner->bodyLimitExceeded = true;
ap_log_rerror(APLOG_MARK, APLOG_NOTICE, 0, r, "Body limit exceeded (%lu)", dcfg->requestBodyLimit);
ap_log_rerror(APLOG_MARK, APLOG_WARNING, 0, r, "Body limit exceeded (%lu)", dcfg->requestBodyLimit);
break;
}

scanner->body.append(buf, nbytes);
}
prev_nr_buckets = nr_buckets;

if (scanner->body.length() == scanner->contentLength)
if (scanner->body.length() == scanner->contentLength) {
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "Content-Length %lu reached.", scanner->contentLength);
break;
}

apr_brigade_cleanup(bb);
apr_sleep(1000);
} while (true);

} while( !scanner->bodyLimitExceeded && !seen_eos );

// cerr << "[pid " << getpid() << "] read " << scanner->body.length() << " bytes, ";
// cerr << "content-length: " << scanner->contentLength << endl;
// cerr << "body: " << scanner->body << endl;

ap_log_rerror(APLOG_MARK, APLOG_INFO, 0, r, "Brigades processing completed. Process body.");

// Run scanner
ret = scanner->processBody();

Expand All @@ -361,8 +386,23 @@ static int fixups(request_rec *r) {
return ret;

read_error_out:
if (dcfg->useenv) return DECLINED;
return HTTP_INTERNAL_SERVER_ERROR;
switch(status) {
case APR_EOF : /* EOF when reading request body. */
r->connection->keepalive = AP_CONN_CLOSE;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "HTTP_BAD_REQUEST");
return HTTP_BAD_REQUEST;
case APR_TIMEUP : /* Timeout. */
r->connection->keepalive = AP_CONN_CLOSE;
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "HTTP_REQUEST_TIME_OUT.");
return HTTP_REQUEST_TIME_OUT;
case AP_FILTER_ERROR :
case APR_EGENERAL :
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "DECLINED.");
return DECLINED;
default :
ap_log_rerror(APLOG_MARK, APLOG_DEBUG, 0, r, "HTTP_INTERNAL_SERVER_ERROR.");
return HTTP_INTERNAL_SERVER_ERROR;
}
}

/* Apache callback to register our hooks.
Expand Down

0 comments on commit 9efbe6e

Please sign in to comment.