|
16 | 16 | import subprocess
|
17 | 17 |
|
18 | 18 |
|
19 |
| -OUTPUT_IMAGE_FILE = str(Config.MODEL_DIR / "prediction" / f"{Config.OUTPUT_NAME}.TFRecord") |
20 |
| -if not os.path.exists(str(Config.MODEL_DIR / "prediction")): os.mkdir(str(Config.MODEL_DIR / "prediction")) |
| 19 | +config_file = "config.env" |
| 20 | +config = Config(config_file) |
| 21 | + |
| 22 | +OUTPUT_IMAGE_FILE = str(config.MODEL_DIR / "prediction" / f"{config.OUTPUT_NAME}.TFRecord") |
| 23 | +if not os.path.exists(str(config.MODEL_DIR / "prediction")): os.mkdir(str(config.MODEL_DIR / "prediction")) |
21 | 24 | print(f"OUTPUT_IMAGE_FILE: {OUTPUT_IMAGE_FILE}")
|
22 | 25 |
|
23 |
| -OUTPUT_GCS_PATH = f"gs://{Config.GCS_BUCKET}/prediction/{Config.OUTPUT_NAME}.TFRecord" |
| 26 | +OUTPUT_GCS_PATH = f"gs://{config.GCS_BUCKET}/prediction/{config.OUTPUT_NAME}.TFRecord" |
24 | 27 | print(f"OUTPUT_GCS_PATH: {OUTPUT_GCS_PATH}")
|
25 | 28 |
|
26 |
| -ls = f"sudo gsutil ls gs://{Config.GCS_BUCKET}/{Config.GCS_IMAGE_DIR}/" |
| 29 | +ls = f"sudo gsutil ls gs://{config.GCS_BUCKET}/{config.GCS_IMAGE_DIR}/" |
27 | 30 | print(f"ls >> : {ls}")
|
28 | 31 | files_list = subprocess.check_output(ls, shell=True)
|
29 | 32 | files_list = files_list.decode("utf-8")
|
30 | 33 | files_list = files_list.split("\n")
|
31 | 34 |
|
32 | 35 | # Get only the files generated by the image export.
|
33 |
| -exported_files_list = [s for s in files_list if Config.GCS_IMAGE_PREFIX in s] |
| 36 | +exported_files_list = [s for s in files_list if config.GCS_IMAGE_PREFIX in s] |
34 | 37 |
|
35 | 38 | print(f"exported_files_list: {exported_files_list}")
|
36 | 39 |
|
|
51 | 54 | print(f"json_file: {json_file}")
|
52 | 55 |
|
53 | 56 | if Config.USE_BEST_MODEL_FOR_INFERENCE:
|
54 |
| - print(f"Using best model for inference.\nLoading model from {str(Config.MODEL_DIR)}/{Config.MODEL_CHECKPOINT_NAME}.tf") |
55 |
| - this_model = tf.keras.models.load_model(f"{str(Config.MODEL_DIR)}/{Config.MODEL_CHECKPOINT_NAME}.tf") |
| 57 | + print(f"Using best model for inference.\nLoading model from {str(config.MODEL_DIR)}/{config.MODEL_CHECKPOINT_NAME}.tf") |
| 58 | + this_model = tf.keras.models.load_model(f"{str(config.MODEL_DIR)}/{config.MODEL_CHECKPOINT_NAME}.tf") |
56 | 59 | else:
|
57 |
| - print(f"Using last model for inference.\nLoading model from {str(Config.MODEL_DIR)}/trained-model") |
58 |
| - this_model = tf.keras.models.load_model(f"{str(Config.MODEL_DIR)}/trained-model") |
| 60 | + print(f"Using last model for inference.\nLoading model from {str(config.MODEL_DIR)}/trained-model") |
| 61 | + this_model = tf.keras.models.load_model(f"{str(config.MODEL_DIR)}/trained-model") |
59 | 62 |
|
60 | 63 | print(this_model.summary())
|
61 | 64 |
|
|
74 | 77 | patch_dimensions_flat = [patch_width * patch_height, 1]
|
75 | 78 |
|
76 | 79 | # Get set up for prediction.
|
77 |
| -if Config.KERNEL_BUFFER: |
78 |
| - x_buffer = Config.KERNEL_BUFFER[0] // 2 |
79 |
| - y_buffer = Config.KERNEL_BUFFER[1] // 2 |
| 80 | +if config.KERNEL_BUFFER: |
| 81 | + x_buffer = config.KERNEL_BUFFER[0] // 2 |
| 82 | + y_buffer = config.KERNEL_BUFFER[1] // 2 |
80 | 83 |
|
81 | 84 | buffered_shape = [
|
82 |
| - Config.PATCH_SHAPE[0] + Config.KERNEL_BUFFER[0], |
83 |
| - Config.PATCH_SHAPE[1] + Config.KERNEL_BUFFER[1], |
| 85 | + config.PATCH_SHAPE[0] + config.KERNEL_BUFFER[0], |
| 86 | + config.PATCH_SHAPE[1] + config.KERNEL_BUFFER[1], |
84 | 87 | ]
|
85 | 88 | else:
|
86 | 89 | x_buffer = 0
|
87 | 90 | y_buffer = 0
|
88 |
| - buffered_shape = Config.PATCH_SHAPE |
| 91 | + buffered_shape = config.PATCH_SHAPE |
89 | 92 |
|
90 |
| -if Config.USE_ELEVATION: |
91 |
| - Config.FEATURES.extend(["elevation", "slope"]) |
| 93 | +if config.USE_ELEVATION: |
| 94 | + config.FEATURES.extend(["elevation", "slope"]) |
92 | 95 |
|
93 | 96 |
|
94 |
| -if Config.USE_S1: |
95 |
| - Config.FEATURES.extend(["vv_asc_before", "vh_asc_before", "vv_asc_during", "vh_asc_during", |
| 97 | +if config.USE_S1: |
| 98 | + config.FEATURES.extend(["vv_asc_before", "vh_asc_before", "vv_asc_during", "vh_asc_during", |
96 | 99 | "vv_desc_before", "vh_desc_before", "vv_desc_during", "vh_desc_during"])
|
97 | 100 |
|
98 |
| -print(f"Config.FEATURES: {Config.FEATURES}") |
| 101 | +print(f"Config.FEATURES: {config.FEATURES}") |
99 | 102 |
|
100 | 103 | image_columns = [
|
101 |
| - tf.io.FixedLenFeature(shape=buffered_shape, dtype=tf.float32) for k in Config.FEATURES |
| 104 | + tf.io.FixedLenFeature(shape=buffered_shape, dtype=tf.float32) for k in config.FEATURES |
102 | 105 | ]
|
103 | 106 |
|
104 |
| -image_features_dict = dict(zip(Config.FEATURES, image_columns)) |
| 107 | +image_features_dict = dict(zip(config.FEATURES, image_columns)) |
105 | 108 |
|
106 | 109 | def parse_image(example_proto):
|
107 | 110 | return tf.io.parse_single_example(example_proto, image_features_dict)
|
108 | 111 |
|
109 | 112 | def toTupleImage(inputs):
|
110 |
| - inputsList = [inputs.get(key) for key in Config.FEATURES] |
| 113 | + inputsList = [inputs.get(key) for key in config.FEATURES] |
111 | 114 | stacked = tf.stack(inputsList, axis=0)
|
112 | 115 | stacked = tf.transpose(stacked, [1, 2, 0])
|
113 | 116 | return stacked
|
@@ -139,8 +142,8 @@ def toTupleImage(inputs):
|
139 | 142 | print(f"Writing patch {i}...")
|
140 | 143 |
|
141 | 144 | prediction_patch = prediction_patch[
|
142 |
| - x_buffer: x_buffer+Config.PATCH_SHAPE[0], |
143 |
| - y_buffer: y_buffer+Config.PATCH_SHAPE[1] |
| 145 | + x_buffer: x_buffer+config.PATCH_SHAPE[0], |
| 146 | + y_buffer: y_buffer+config.PATCH_SHAPE[1] |
144 | 147 | ]
|
145 | 148 |
|
146 | 149 | example = tf.train.Example(
|
@@ -181,6 +184,6 @@ def toTupleImage(inputs):
|
181 | 184 | print(f"uploading classified image to earth engine: {result}")
|
182 | 185 |
|
183 | 186 | # upload to earth engine asset
|
184 |
| -upload_image = f"earthengine upload image --asset_id={Config.EE_OUTPUT_ASSET}/{Config.OUTPUT_NAME} --pyramiding_policy=mode {OUTPUT_GCS_PATH} {json_file}" |
| 187 | +upload_image = f"earthengine upload image --asset_id={config.EE_OUTPUT_ASSET}/{config.OUTPUT_NAME} --pyramiding_policy=mode {OUTPUT_GCS_PATH} {json_file}" |
185 | 188 | result = subprocess.check_output(upload_image, shell=True)
|
186 | 189 | print(f"uploading classified image to earth engine: {result}")
|
0 commit comments