diff --git a/.github/workflows/spark_test.yaml b/.github/workflows/spark_test.yaml index baec6b8b41..6b2b1633cd 100644 --- a/.github/workflows/spark_test.yaml +++ b/.github/workflows/spark_test.yaml @@ -12,6 +12,7 @@ jobs: shard: [0, 1, 2] env: SCALA_VERSION: ${{ matrix.scala }} + SHARD_NUM: ${{ matrix.shard }} # Important: This must be the same as the length of shards in matrix NUM_SHARDS: 3 steps: @@ -89,6 +90,5 @@ jobs: if: always() uses: actions/upload-artifact@v4 with: - name: test-results-DSL-${{ matrix.scala }}-shard-${{ matrix.shard }}-run-${{ github.run_id }}-attempt-${{ github.run_attempt }} - path: | - test_results_*.csv + name: test-results-DSL-${{ matrix.scala }}-run-${{ github.run_id }}-attempt-${{ github.run_attempt }} + path: test_results_*.csv diff --git a/project/TestTimeListener.scala b/project/TestTimeListener.scala index 3d31f84aa2..260e98b209 100644 --- a/project/TestTimeListener.scala +++ b/project/TestTimeListener.scala @@ -35,9 +35,12 @@ object TestTimeListener extends TestReportListener { /** testSuite -> Seq[(testName, duration, result)] */ private val testResults = new ConcurrentHashMap[String, Seq[(String, Long, String)]]() - /** Generate a unique file name per JVM using process ID and timestamp */ - private val jvmId = ManagementFactory.getRuntimeMXBean().getName.split("@")(0) - private val individualTestCsvPath = s"test_results_${jvmId}_${System.currentTimeMillis()}.csv" + /** Generate a unique file name per JVM using process ID and shard number */ + private val individualTestCsvPath = { + val shardNumber = sys.env.getOrElse("SHARD_NUM", "0") + val jvmId = ManagementFactory.getRuntimeMXBean().getName.split("@")(0) + s"test_results_shard_${shardNumber}_jvm_${jvmId}.csv" + } /** Lock to ensure only one thread writes to a file at a time within this JVM */ private val writeLock = new ReentrantLock()