Skip to content

Commit

Permalink
Do not exit running jobs on spark. It is possible the container may e…
Browse files Browse the repository at this point in the history
…xit before the collector collects metrics (#5813)
  • Loading branch information
atoulme authored Jan 18, 2025
1 parent 22d8607 commit b4d1a4c
Showing 1 changed file with 10 additions and 2 deletions.
12 changes: 10 additions & 2 deletions docker/apachespark/long_running.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,13 +26,13 @@
#

import time
import signal
import threading
import queue as Queue
from typing import Any, Callable, List, Tuple

from pyspark import SparkConf, SparkContext


def delayed(seconds: int) -> Callable[[Any], Any]:
def f(x: int) -> int:
time.sleep(seconds)
Expand Down Expand Up @@ -76,7 +76,15 @@ def run() -> List[Tuple[int, int]]:
print("Job results are:", result.get())
sc.stop()

def signal_handler(signal, frame):
global interrupted
interrupted = True

if __name__ == "__main__":
for i in range(100):
signal.signal(signal.SIGINT, signal_handler)
global interrupted
interrupted = False
while True:
main()
if interrupted:
break

0 comments on commit b4d1a4c

Please sign in to comment.