diff --git a/pom.xml b/pom.xml index ceb5b986e..f3542d319 100644 --- a/pom.xml +++ b/pom.xml @@ -193,6 +193,10 @@ org.yaml snakeyaml + + com.h2database + h2 + @@ -348,7 +352,14 @@ com.github.tomakehurst wiremock-jre8 - 2.35.1 + 3.0.1 + test + + + + com.h2database + h2 + 2.2.224 test diff --git a/src/main/java/alpine/server/persistence/PersistenceInitializer.java b/src/main/java/alpine/server/persistence/PersistenceInitializer.java new file mode 100644 index 000000000..51cff6000 --- /dev/null +++ b/src/main/java/alpine/server/persistence/PersistenceInitializer.java @@ -0,0 +1,38 @@ +/* + * This file is part of Alpine. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Steve Springett. All Rights Reserved. + */ +package alpine.server.persistence; + +import javax.servlet.ServletContextEvent; +import javax.servlet.ServletContextListener; + + +public class PersistenceInitializer implements ServletContextListener { + + // Alpine Starts the H2 database engine if the database mode is set to 'server' + // Since the application is built to run with postgres db, H2 dependency + // and initialization of server has been removed in shadowed class + + @Override + public void contextInitialized(ServletContextEvent event) { + } + + @Override + public void contextDestroyed(ServletContextEvent event) { + } +} diff --git a/src/main/java/alpine/server/persistence/PersistenceManagerFactory.java b/src/main/java/alpine/server/persistence/PersistenceManagerFactory.java new file mode 100644 index 000000000..16e372ae2 --- /dev/null +++ b/src/main/java/alpine/server/persistence/PersistenceManagerFactory.java @@ -0,0 +1,372 @@ +/* + * This file is part of Alpine. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + * Copyright (c) Steve Springett. All Rights Reserved. + */ +package alpine.server.persistence; + +import alpine.Config; +import alpine.common.logging.Logger; +import alpine.common.metrics.Metrics; +import alpine.model.InstalledUpgrades; +import alpine.model.SchemaVersion; +import alpine.persistence.IPersistenceManagerFactory; +import alpine.persistence.JdoProperties; +import com.zaxxer.hikari.HikariConfig; +import com.zaxxer.hikari.HikariDataSource; +import io.micrometer.core.instrument.FunctionCounter; +import io.micrometer.core.instrument.Gauge; +import org.datanucleus.PersistenceNucleusContext; +import org.datanucleus.PropertyNames; +import org.datanucleus.api.jdo.JDOPersistenceManagerFactory; +import org.datanucleus.store.schema.SchemaAwareStoreManager; + +import javax.jdo.JDOHelper; +import javax.jdo.PersistenceManager; +import javax.servlet.ServletContextEvent; +import javax.servlet.ServletContextListener; +import javax.sql.DataSource; +import java.util.HashSet; +import java.util.Properties; +import java.util.Set; +import java.util.function.Function; + +/** + * Initializes the JDO persistence manager on server startup. + * + * @author Steve Springett + * @since 1.0.0 + */ +public class PersistenceManagerFactory implements IPersistenceManagerFactory, ServletContextListener { + + private static final Logger LOGGER = Logger.getLogger(PersistenceManagerFactory.class); + private static final String DATANUCLEUS_METRICS_PREFIX = "datanucleus_"; + + private static JDOPersistenceManagerFactory pmf; + + @Override + public void contextInitialized(ServletContextEvent event) { + LOGGER.info("Initializing persistence framework"); + + final var dnProps = new Properties(); + + // Apply pass-through properties first. Settings that are hardcoded, or have dedicated + // AlpineKeys, must still take precedence over pass-through properties. + dnProps.putAll(Config.getInstance().getPassThroughProperties("datanucleus")); + dnProps.put(PropertyNames.PROPERTY_PERSISTENCE_UNIT_NAME, "Alpine"); + dnProps.put(PropertyNames.PROPERTY_SCHEMA_AUTOCREATE_DATABASE, "true"); + dnProps.put(PropertyNames.PROPERTY_SCHEMA_AUTOCREATE_TABLES, "true"); + dnProps.put(PropertyNames.PROPERTY_SCHEMA_AUTOCREATE_COLUMNS, "true"); + dnProps.put(PropertyNames.PROPERTY_SCHEMA_AUTOCREATE_CONSTRAINTS, "true"); + dnProps.put(PropertyNames.PROPERTY_SCHEMA_GENERATE_DATABASE_MODE, "create"); + dnProps.put(PropertyNames.PROPERTY_QUERY_JDOQL_ALLOWALL, "true"); + if (Config.getInstance().getPropertyAsBoolean(Config.AlpineKey.METRICS_ENABLED)) { + dnProps.put(PropertyNames.PROPERTY_ENABLE_STATISTICS, "true"); + } + + if (Config.getInstance().getPropertyAsBoolean(Config.AlpineKey.DATABASE_POOL_ENABLED)) { + // DataNucleus per default creates two connection factories. + // - Primary: Used for operations in transactional context + // - Secondary: Used for operations in non-transactional context, schema generation and value generation + // + // When using pooling, DN will thus create two connection pools of equal size. + // However, the optimal sizing of these pools depends on how the application makes use of transactions. + // When only performing operations within transactions, connections in the secondary pool would remain + // mostly idle. + // + // See also: + // - https://www.datanucleus.org/products/accessplatform_6_0/jdo/persistence.html#datastore_connection + // - https://datanucleus.groups.io/g/main/topic/95191894#490 + + LOGGER.info("Creating transactional connection pool"); + dnProps.put(PropertyNames.PROPERTY_CONNECTION_FACTORY, createTxPooledDataSource()); + + LOGGER.info("Creating non-transactional connection pool"); + dnProps.put(PropertyNames.PROPERTY_CONNECTION_FACTORY2, createNonTxPooledDataSource()); + } else { + // No connection pooling; Let DataNucleus handle the datasource setup + dnProps.put(PropertyNames.PROPERTY_CONNECTION_URL, Config.getInstance().getProperty(Config.AlpineKey.DATABASE_URL)); + dnProps.put(PropertyNames.PROPERTY_CONNECTION_DRIVER_NAME, Config.getInstance().getProperty(Config.AlpineKey.DATABASE_DRIVER)); + dnProps.put(PropertyNames.PROPERTY_CONNECTION_USER_NAME, Config.getInstance().getProperty(Config.AlpineKey.DATABASE_USERNAME)); + dnProps.put(PropertyNames.PROPERTY_CONNECTION_PASSWORD, Config.getInstance().getPropertyOrFile(Config.AlpineKey.DATABASE_PASSWORD)); + } + + pmf = (JDOPersistenceManagerFactory) JDOHelper.getPersistenceManagerFactory(dnProps, "Alpine"); + + if (Config.getInstance().getPropertyAsBoolean(Config.AlpineKey.METRICS_ENABLED)) { + LOGGER.info("Registering DataNucleus metrics"); + registerDataNucleusMetrics(pmf); + } + + // Ensure that the UpgradeMetaProcessor and SchemaVersion tables are created NOW, not dynamically at runtime. + final PersistenceNucleusContext ctx = pmf.getNucleusContext(); + final Set classNames = new HashSet<>(); + classNames.add(InstalledUpgrades.class.getCanonicalName()); + classNames.add(SchemaVersion.class.getCanonicalName()); + ((SchemaAwareStoreManager)ctx.getStoreManager()).createSchemaForClasses(classNames, new Properties()); + } + + @Override + public void contextDestroyed(ServletContextEvent event) { + LOGGER.info("Shutting down persistence framework"); + tearDown(); + } + + /** + * Creates a new JDO PersistenceManager. + * @return a PersistenceManager + */ + public static PersistenceManager createPersistenceManager() { + if (pmf == null && Config.isUnitTestsEnabled()) { + pmf = (JDOPersistenceManagerFactory)JDOHelper.getPersistenceManagerFactory(JdoProperties.unit(), "Alpine"); + } + if (pmf == null) { + throw new IllegalStateException("Context is not initialized yet."); + } + return pmf.getPersistenceManager(); + } + + public PersistenceManager getPersistenceManager() { + return createPersistenceManager(); + } + + + /** + * Set the {@link JDOPersistenceManagerFactory} to be used by {@link PersistenceManagerFactory}. + *

+ * This is mainly useful for integration tests that run outside a servlet context, + * yet require a persistence context setup with an external database. + * + * @param pmf The {@link JDOPersistenceManagerFactory} to set + * @throws IllegalStateException When the {@link JDOPersistenceManagerFactory} was already initialized + * @since 2.1.0 + */ + @SuppressWarnings("unused") + public static void setJdoPersistenceManagerFactory(final JDOPersistenceManagerFactory pmf) { + if (PersistenceManagerFactory.pmf != null) { + throw new IllegalStateException("The PersistenceManagerFactory can only be set when it hasn't been initialized yet."); + } + + PersistenceManagerFactory.pmf = pmf; + } + + /** + * Closes the {@link JDOPersistenceManagerFactory} and removes any reference to it. + *

+ * This method should be called in the {@code tearDown} method of unit- and integration + * tests that interact with the persistence layer. + * + * @since 2.1.0 + */ + public static void tearDown() { + if (pmf != null) { + pmf.close(); + pmf = null; + } + } + + private void registerDataNucleusMetrics(final JDOPersistenceManagerFactory pmf) { + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "datastore_reads_total", pmf, + p -> p.getNucleusContext().getStatistics().getNumberOfDatastoreReads()) + .description("Total number of read operations from the datastore") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "datastore_writes_total", pmf, + p -> p.getNucleusContext().getStatistics().getNumberOfDatastoreWrites()) + .description("Total number of write operations to the datastore") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_fetches_total", pmf, + p -> p.getNucleusContext().getStatistics().getNumberOfObjectFetches()) + .description("Total number of objects fetched from the datastore") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_inserts_total", pmf, + p -> p.getNucleusContext().getStatistics().getNumberOfObjectInserts()) + .description("Total number of objects inserted into the datastore") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_updates_total", pmf, + p -> p.getNucleusContext().getStatistics().getNumberOfObjectUpdates()) + .description("Total number of objects updated in the datastore") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "object_deletes_total", pmf, + p -> p.getNucleusContext().getStatistics().getNumberOfObjectDeletes()) + .description("Total number of objects deleted from the datastore") + .register(Metrics.getRegistry()); + + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "query_execution_time_ms_avg", pmf, + p -> p.getNucleusContext().getStatistics().getQueryExecutionTimeAverage()) + .description("Average query execution time in milliseconds") + .register(Metrics.getRegistry()); + + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "queries_active", pmf, + p -> p.getNucleusContext().getStatistics().getQueryActiveTotalCount()) + .description("Number of currently active queries") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "queries_executed_total", pmf, + p -> p.getNucleusContext().getStatistics().getQueryExecutionTotalCount()) + .description("Total number of executed queries") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "queries_failed_total", pmf, + p -> p.getNucleusContext().getStatistics().getQueryErrorTotalCount()) + .description("Total number of queries that completed with an error") + .register(Metrics.getRegistry()); + + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "transaction_execution_time_ms_avg", pmf, + p -> p.getNucleusContext().getStatistics().getTransactionExecutionTimeAverage()) + .description("Average transaction execution time in milliseconds") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_active", pmf, + p -> p.getNucleusContext().getStatistics().getTransactionActiveTotalCount()) + .description("Number of currently active transactions") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_total", pmf, + p -> p.getNucleusContext().getStatistics().getTransactionTotalCount()) + .description("Total number of transactions") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_committed_total", pmf, + p -> p.getNucleusContext().getStatistics().getTransactionCommittedTotalCount()) + .description("Total number of committed transactions") + .register(Metrics.getRegistry()); + + FunctionCounter.builder(DATANUCLEUS_METRICS_PREFIX + "transactions_rolledback_total", pmf, + p -> p.getNucleusContext().getStatistics().getTransactionRolledBackTotalCount()) + .description("Total number of rolled-back transactions") + .register(Metrics.getRegistry()); + + // This number does not necessarily equate the number of physical connections. + // It resembles the number of active connections MANAGED BY DATANUCLEUS. + // The number of connections reported by connection pool metrics will differ. + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "connections_active", pmf, + p -> p.getNucleusContext().getStatistics().getConnectionActiveCurrent()) + .description("Number of currently active managed datastore connections") + .register(Metrics.getRegistry()); + + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_second_level_entries", pmf, + p -> p.getNucleusContext().getLevel2Cache().getSize()) + .description("Number of entries in the second level cache") + .register(Metrics.getRegistry()); + + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_query_generic_compilation_entries", pmf, + p -> p.getQueryGenericCompilationCache().size()) + .description("Number of entries in the generic query compilation cache") + .register(Metrics.getRegistry()); + + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_query_datastore_compilation_entries", pmf, + p -> p.getQueryDatastoreCompilationCache().size()) + .description("Number of entries in the datastore query compilation cache") + .register(Metrics.getRegistry()); + + // Note: The query results cache is disabled per default. + Gauge.builder(DATANUCLEUS_METRICS_PREFIX + "cache_query_result_entries", pmf, + p -> p.getQueryCache().getQueryCache().size()) + .description("Number of entries in the query result cache") + .register(Metrics.getRegistry()); + } + + private DataSource createTxPooledDataSource() { + final var hikariConfig = createBaseHikariConfig("transactional"); + hikariConfig.setMaximumPoolSize(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_TX_MAX_SIZE, + Config.AlpineKey.DATABASE_POOL_MAX_SIZE, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setMinimumIdle(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_TX_MIN_IDLE, + Config.AlpineKey.DATABASE_POOL_MIN_IDLE, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setMaxLifetime(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_TX_MAX_LIFETIME, + Config.AlpineKey.DATABASE_POOL_MAX_LIFETIME, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setIdleTimeout(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_TX_IDLE_TIMEOUT, + Config.AlpineKey.DATABASE_POOL_IDLE_TIMEOUT, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setKeepaliveTime(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_TX_KEEPALIVE_INTERVAL, + Config.AlpineKey.DATABASE_POOL_KEEPALIVE_INTERVAL, + Config.getInstance()::getPropertyAsInt + )); + return new HikariDataSource(hikariConfig); + } + + private DataSource createNonTxPooledDataSource() { + final var hikariConfig = createBaseHikariConfig("non-transactional"); + hikariConfig.setMaximumPoolSize(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_NONTX_MAX_SIZE, + Config.AlpineKey.DATABASE_POOL_MAX_SIZE, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setMinimumIdle(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_NONTX_MIN_IDLE, + Config.AlpineKey.DATABASE_POOL_MIN_IDLE, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setMaxLifetime(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_NONTX_MAX_LIFETIME, + Config.AlpineKey.DATABASE_POOL_MAX_LIFETIME, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setIdleTimeout(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_NONTX_IDLE_TIMEOUT, + Config.AlpineKey.DATABASE_POOL_IDLE_TIMEOUT, + Config.getInstance()::getPropertyAsInt + )); + hikariConfig.setKeepaliveTime(getConfigPropertyWithFallback( + Config.AlpineKey.DATABASE_POOL_NONTX_KEEPALIVE_INTERVAL, + Config.AlpineKey.DATABASE_POOL_KEEPALIVE_INTERVAL, + Config.getInstance()::getPropertyAsInt + )); + return new HikariDataSource(hikariConfig); + } + + private HikariConfig createBaseHikariConfig(final String poolName) { + final var hikariConfig = new HikariConfig(); + hikariConfig.setPoolName(poolName); + hikariConfig.setJdbcUrl(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_URL)); + hikariConfig.setDriverClassName(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_DRIVER)); + hikariConfig.setUsername(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_USERNAME)); + hikariConfig.setPassword(Config.getInstance().getProperty(Config.AlpineKey.DATABASE_PASSWORD)); + + if (Config.getInstance().getPropertyAsBoolean(Config.AlpineKey.METRICS_ENABLED)) { + hikariConfig.setMetricRegistry(Metrics.getRegistry()); + } + + return hikariConfig; + } + + private T getConfigPropertyWithFallback(final Config.Key key, final Config.Key fallbackKey, + final Function method) { + if (Config.getInstance().getProperty(key) != null) { + return method.apply(key); + } + + return method.apply(fallbackKey); + } + +} + diff --git a/src/main/java/org/dependencytrack/persistence/H2WebConsoleInitializer.java b/src/main/java/org/dependencytrack/persistence/H2WebConsoleInitializer.java deleted file mode 100644 index d526926c3..000000000 --- a/src/main/java/org/dependencytrack/persistence/H2WebConsoleInitializer.java +++ /dev/null @@ -1,59 +0,0 @@ -package org.dependencytrack.persistence; - -import alpine.Config; -import alpine.common.logging.Logger; -import org.h2.Driver; -import org.h2.server.web.WebServlet; - -import javax.servlet.ServletContext; -import javax.servlet.ServletContextEvent; -import javax.servlet.ServletContextListener; -import javax.servlet.ServletRegistration; -import java.util.Map; - -public class H2WebConsoleInitializer implements ServletContextListener { - private static final Logger LOGGER = Logger.getLogger(H2WebConsoleInitializer.class); - - private static final String H2_CONSOLE_ENABLED_INIT_PARAM = "h2.console.enabled"; - private static final String H2_CONSOLE_PATH_INIT_PARAM = "h2.console.path"; - - /** - * {@inheritDoc} - */ - @Override - public void contextInitialized(final ServletContextEvent event) { - Config configuration = Config.getInstance(); - String databaseMode = configuration.getProperty(Config.AlpineKey.DATABASE_MODE); - String databaseDriver = configuration.getProperty(Config.AlpineKey.DATABASE_DRIVER); - Boolean h2ConsoleEnabled = Boolean.valueOf(event.getServletContext().getInitParameter(H2_CONSOLE_ENABLED_INIT_PARAM)); - // Misconfiguration check, if external database is used, no need to pointlessly expose the H2 console - if ("external".equals(databaseMode) || !Driver.class.getName().equals(databaseDriver) || !h2ConsoleEnabled) { - LOGGER.debug("H2 web console will not be initialized since either database mode is external or database driver is not H2 or the console is simply disabled !"); - LOGGER.debug("Database mode : " + databaseMode); - LOGGER.debug("Database driver : " + databaseDriver); - LOGGER.debug("H2 web console enabled : " + h2ConsoleEnabled); - return; - } - String h2ConsolePath = event.getServletContext().getInitParameter(H2_CONSOLE_PATH_INIT_PARAM); - LOGGER.warn("Building and exposing H2 web servlet to " + h2ConsolePath); - LOGGER.warn("It should only be enabled for development purposes to avoid security risks related to production data leak."); - ServletContext servletContext = event.getServletContext(); - WebServlet h2WebServlet = new WebServlet(); - ServletRegistration.Dynamic registration = servletContext.addServlet("h2Console", h2WebServlet); - registration.addMapping(h2ConsolePath + "/*"); - registration.setLoadOnStartup(1); - // Production filter alteration : we rely here on the fact the Jetty server does not entirely respect Servlet 3.0 specs. See https://github.com/DependencyTrack/dependency-track/pull/2561 - // H2 Console uses local iframe - servletContext.getFilterRegistration("CspFilter").getInitParameters().put("frame-ancestors", "'self'"); - // Allow H2 web console path - Map whitelistUrlParams = servletContext.getFilterRegistration("WhitelistUrlFilter").getInitParameters(); - String allowUrls = whitelistUrlParams.get("allowUrls"); - if (allowUrls != null && !allowUrls.contains(h2ConsolePath)) { - whitelistUrlParams.put("allowUrls", allowUrls + "," + h2ConsolePath); - } - String forwardExcludes = whitelistUrlParams.get("forwardExcludes"); - if (forwardExcludes != null && !forwardExcludes.contains(h2ConsolePath)) { - whitelistUrlParams.put("forwardExcludes", forwardExcludes + "," + h2ConsolePath); - } - } -} diff --git a/src/main/java/org/dependencytrack/persistence/VulnerableSoftwareQueryManager.java b/src/main/java/org/dependencytrack/persistence/VulnerableSoftwareQueryManager.java index 6fad48a4f..a542c0078 100644 --- a/src/main/java/org/dependencytrack/persistence/VulnerableSoftwareQueryManager.java +++ b/src/main/java/org/dependencytrack/persistence/VulnerableSoftwareQueryManager.java @@ -21,11 +21,11 @@ import alpine.persistence.PaginatedResult; import alpine.resources.AlpineRequest; import com.github.packageurl.PackageURL; +import org.apache.commons.lang3.StringUtils; import org.dependencytrack.model.Cpe; import org.dependencytrack.model.Cwe; import org.dependencytrack.model.Vulnerability; import org.dependencytrack.model.VulnerableSoftware; -import org.h2.util.StringUtils; import javax.jdo.PersistenceManager; import javax.jdo.Query; @@ -304,7 +304,7 @@ public PaginatedResult getCwes() { query.setOrdering("id asc"); } if (filter != null) { - if (StringUtils.isNumber(filter)) { + if (StringUtils.isNumeric(filter)) { query.setFilter("cweId == :cweId || name.matches(:filter)"); final String filterString = ".*" + filter.toLowerCase() + ".*"; return execute(query, Integer.valueOf(filter), filterString); diff --git a/src/main/webapp/WEB-INF/web.xml b/src/main/webapp/WEB-INF/web.xml index 9431d0f1c..ff32ad71f 100644 --- a/src/main/webapp/WEB-INF/web.xml +++ b/src/main/webapp/WEB-INF/web.xml @@ -56,9 +56,6 @@ org.dependencytrack.event.kafka.KafkaStreamsInitializer - - org.dependencytrack.persistence.H2WebConsoleInitializer - org.dependencytrack.event.IntegrityMetaInitializer