This repository has been archived by the owner on Oct 20, 2022. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
10 changed files
with
2,088 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
# Project own folders | ||
saved_models/ | ||
logs/ | ||
log/ | ||
out/ | ||
data/ | ||
results/ | ||
**/job.sh | ||
job.sbatch | ||
slurm* | ||
|
||
# python cache | ||
__pycache__/ | ||
__pycache__/* | ||
|
||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm | ||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839 | ||
.idea/ | ||
|
||
# User-specific stuff | ||
.idea/**/workspace.xml | ||
.idea/**/tasks.xml | ||
.idea/**/usage.statistics.xml | ||
.idea/**/dictionaries | ||
.idea/**/shelf | ||
|
||
# Generated files | ||
.idea/**/contentModel.xml | ||
|
||
# Sensitive or high-churn files | ||
.idea/**/dataSources/ | ||
.idea/**/dataSources.ids | ||
.idea/**/dataSources.local.xml | ||
.idea/**/sqlDataSources.xml | ||
.idea/**/dynamic.xml | ||
.idea/**/uiDesigner.xml | ||
.idea/**/dbnavigator.xml | ||
|
||
# Gradle | ||
.idea/**/gradle.xml | ||
.idea/**/libraries | ||
|
||
# Gradle and Maven with auto-import | ||
# When using Gradle or Maven with auto-import, you should exclude module files, | ||
# since they will be recreated, and may cause churn. Uncomment if using | ||
# auto-import. | ||
# .idea/modules.xml | ||
# .idea/*.iml | ||
# .idea/modules | ||
# *.iml | ||
# *.ipr | ||
|
||
# CMake | ||
cmake-build-*/ | ||
|
||
# Mongo Explorer plugin | ||
.idea/**/mongoSettings.xml | ||
|
||
# File-based project format | ||
*.iws | ||
|
||
# IntelliJ | ||
out/ | ||
|
||
# mpeltonen/sbt-idea plugin | ||
.idea_modules/ | ||
|
||
# JIRA plugin | ||
atlassian-ide-plugin.xml | ||
|
||
# Cursive Clojure plugin | ||
.idea/replstate.xml | ||
|
||
# Crashlytics plugin (for Android Studio and IntelliJ) | ||
com_crashlytics_export_strings.xml | ||
crashlytics.properties | ||
crashlytics-build.properties | ||
fabric.properties | ||
|
||
# Editor-based Rest Client | ||
.idea/httpRequests | ||
|
||
# Android studio 3.1+ serialized cache file | ||
.idea/caches/build_file_checksums.ser | ||
|
||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,43 @@ | ||
import scipy.io | ||
import os | ||
from os import path | ||
from pathlib import Path | ||
import sys | ||
sys.path.insert(1, "../") | ||
from dataset_reader import PolsarDatasetHandler | ||
|
||
labels_path = '/media/barrachina/data/datasets/PolSar/Oberpfaffenhofen/Label_Germany.mat' | ||
t_path = '/media/barrachina/data/datasets/PolSar/Oberpfaffenhofen/ESAR_Oberpfaffenhofen_T6/Master_Track_Slave_Track/T6' | ||
s_path = '/media/barrachina/data/datasets/PolSar/Oberpfaffenhofen/ESAR_Oberpfaffenhofen' | ||
|
||
if not os.path.exists(labels_path) or not os.path.exists(t_path) or not os.path.exists(s_path): | ||
raise FileNotFoundError("No path found for Oberpfaffenhofen dataset") | ||
|
||
|
||
class OberpfaffenhofenDataset(PolsarDatasetHandler): | ||
|
||
def __init__(self, *args, **kwargs): | ||
super(OberpfaffenhofenDataset, self).__init__(root_path=os.path.dirname(labels_path), | ||
name="OBER", mode="t", *args, **kwargs) | ||
|
||
def print_ground_truth(self, t=None, *args, **kwargs): | ||
if t is None: | ||
t = self.get_image() | ||
super(OberpfaffenhofenDataset, self).print_ground_truth(t=t, | ||
path=Path(os.path.dirname(labels_path)) / "ground_truth.png", | ||
*args, **kwargs) | ||
|
||
def get_image(self): | ||
return self.open_t_dataset_t3(t_path) | ||
|
||
def get_sparse_labels(self): | ||
return scipy.io.loadmat(labels_path)['label'] | ||
|
||
|
||
if __name__ == "__main__": | ||
print("First Test") | ||
OberpfaffenhofenDataset().get_dataset(method="random", size=128, stride=25, pad="same") | ||
print("First one done") | ||
OberpfaffenhofenDataset(classification=True).get_dataset(method="random", size=12, stride=1, pad="same") | ||
print("Second one done") | ||
OberpfaffenhofenDataset(classification=True).get_dataset(method="random", size=1, stride=1, pad="same") |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,2 +1,105 @@ | ||
# PolSAR CVNN | ||
PolSAR classification / segmentation using complex-valued neural networks. | ||
|
||
## Code usage | ||
|
||
1. Install all dependencies including [cvnn](https://pypi.org/project/cvnn/). | ||
2. Clone this repository | ||
3. Run the file `principal_simulation.py`. Optional parameters go as follows: | ||
``` | ||
usage: principal_simulation.py [-h] [--dataset_method DATASET_METHOD] | ||
[--tensorflow] [--epochs EPOCHS] | ||
[--model MODEL] [--early_stop [EARLY_STOP]] | ||
[--balance BALANCE] [--real_mode [REAL_MODE]] | ||
[--dropout DROPOUT DROPOUT DROPOUT] | ||
[--coherency] [--dataset DATASET] | ||
optional arguments: | ||
-h, --help show this help message and exit | ||
--dataset_method DATASET_METHOD | ||
One of: | ||
- random (default): randomly select the train and val set | ||
- separate: split first the image into sections and select the sets from there | ||
- single_separated_image: as separate, but do not apply the slinding window operation | ||
(no batches, only one image per set). | ||
Only possible with segmentation models | ||
--tensorflow Use tensorflow library | ||
--epochs EPOCHS (int) epochs to be done | ||
--model MODEL deep model to be used. Options: | ||
- fcnn | ||
- cnn | ||
- mlp | ||
- 3d-cnn | ||
--early_stop [EARLY_STOP] | ||
Apply early stopping to training | ||
--balance BALANCE Deal with unbalanced dataset by: | ||
- loss: weighted loss | ||
- dataset: balance dataset by randomly remove pixels of predominant classes | ||
- any other string will be considered as not balanced | ||
--real_mode [REAL_MODE] | ||
run real model instead of complex. | ||
If [REAL_MODE] is used it should be one of: | ||
- real_imag | ||
- amplitude_phase | ||
- amplitude_only | ||
- real_only | ||
--dropout DROPOUT DROPOUT DROPOUT | ||
dropout rate to be used on downsampling, bottle neck, upsampling sections (in order). Example: `python main.py --dropout 0.1 None 0.3` will use 10% dropout on the downsampling part and 30% on the upsamlpling part and no dropout on the bottle neck. | ||
--coherency Use coherency matrix instead of s | ||
--dataset DATASET dataset to be used. Available options: | ||
- SF-AIRSAR | ||
- SF-RS2 | ||
- OBER | ||
Process finished with exit code 0 | ||
``` | ||
4. Once simulations are done the program will create a folder inside `log/<date>/run-<time>/` that will contain the following information: | ||
- `tensorboard`: Files to be visualized with [tensorboard](https://www.tensorflow.org/tensorboard). | ||
- `checkpoints`: Saved model weights of the lowest validation loss obtained. | ||
- `prediction.png`: Image with the predicted image of the best model. | ||
- `model_summary.txt`: Information about the simulation done. | ||
- `history_dict.csv`: The dictionary of all loss and metrics over epoch obtained as a return of [`Model.fit()`](https://www.tensorflow.org/api_docs/python/tf/keras/Model#fit). | ||
- `<dataset>_confusion_matrix.csv`: Confusion matrices for different datasets. | ||
- `evaluate.csv`: Loss and all metrics for all datasets and full image. | ||
|
||
|
||
## Datasets | ||
|
||
### San Francisco | ||
|
||
1. Download the San Francisco dataset. The labels and images are well described in [this](https://arxiv.org/abs/1912.07259) paper. It is important that the format of the folder copies the structure of [this](https://github.com/liuxuvip/PolSF) repository. | ||
2. Change the `root_path` whith the path where the dataset was downloaded on the file `San Francisco/sf_data_reader.py` | ||
|
||
### Oberpfaffenhofen | ||
|
||
1. Download labels from [this repository](https://github.com/fudanxu/CV-CNN/blob/master/Label_Germany.mat) | ||
2. Download image from the [European Space Agency (esa) website](https://step.esa.int/main/toolboxes/polsarpro-v6-0-biomass-edition-toolbox/) | ||
3. Change the `root_path` whith the path where the dataset was downloaded on the file `Oberpfaffenhofen/oberpfaffenhofen_dataset.py` | ||
|
||
### Own dataset | ||
|
||
For using your own dataset: | ||
|
||
1. create a new class that inherits from `PolsarDatasetHandler`. Two methods (at least) should be created. | ||
- `get_image`: Return a numpy array of the 3D image (height, width, channels), channels are usually complex-valued and in the form of coherency matrix or pauli vector representation. | ||
- `get_sparse_labels`: Returns an array with the labels in sparse mode (NOT one-hot encoded). | ||
2. Inside `principal_simulation.py` | ||
- Import your class. | ||
- Add your dataset metadata into `DATASET_META`. | ||
- Add your dataset into `_get_dataset_handler`. | ||
|
||
## Models | ||
|
||
Currently, the following models are supported: | ||
|
||
- FCNN from [Cao et al.](https://www.mdpi.com/2072-4292/11/22/2653) | ||
- CNN from [Zhang et al.](https://ieeexplore.ieee.org/abstract/document/8039431) and then used (to some extent) and present to some extent in [Sun et al.](https://ieeexplore.ieee.org/abstract/document/8809406); [Zhao et al.](https://ieeexplore.ieee.org/abstract/document/8900150); [Qin et al.](https://ieeexplore.ieee.org/abstract/document/9296798) | ||
- MLP from Hansh et al. present in all these papers: [1](https://www.ingentaconnect.com/content/asprs/pers/2010/00000076/00000009/art00008); [2](https://ieeexplore.ieee.org/abstract/document/5758871); [3](https://www.isprs.org/proceedings/xxxviii/1_4_7-W5/paper/Haensch-147.pdf) | ||
- 3D-CNN from [Tan et al.](https://ieeexplore.ieee.org/abstract/document/8864110) | ||
|
||
To create your own model it sufice to: | ||
|
||
1. Create your own `Tensorflow` model (using `cvnn` if needed) and create a function or class that returns it (already compiled). | ||
2. Add it to `_get_model` inside `principal_simulation.py` | ||
3. Add your model name to `MODEL_META` to be able to call the script with your new model parameter. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,62 @@ | ||
import numpy as np | ||
from imageio import imread | ||
from pathlib import Path | ||
from os import path | ||
from typing import Tuple | ||
from pdb import set_trace | ||
import matplotlib.pyplot as plt | ||
import tensorflow as tf | ||
import sys | ||
|
||
# FOr download references use https://arxiv.org/abs/1912.07259 | ||
|
||
# TODO: INSERT HERE THE PATH TO San Francisco dataset | ||
root_path = "/media/barrachina/data/datasets/PolSar/San Francisco/PolSF" | ||
|
||
sys.path.insert(1, "../") | ||
if not path.exists(root_path): | ||
raise FileNotFoundError("path of the san francisco dataset not found") | ||
from dataset_reader import labels_to_rgb, SF_COLORS, PolsarDatasetHandler | ||
|
||
|
||
AVAILABLE_IMAGES = { | ||
"SF-AIRSAR": {"x1": 0, "y1": 0, "x2": 1024, "y2": 900, "y_inverse": False}, | ||
"SF-ALOS2": {"x1": 736, "y1": 2832, "x2": 3520, "y2": 7888, "y_inverse": True}, | ||
# "SF-GF3": {"x1": 1144, "y1": 3464, "x2": 3448, "y2": 6376, "y_inverse": True}, | ||
"SF-RS2": {"x1": 661, "y1": 7326, "x2": 2041, "y2": 9126, "y_inverse": False}, | ||
# "SF-RISAT": {"x1": 2486, "y1": 4257, "x2": 7414, "y2": 10648, "y_inverse": False}, # RISAT is not Pol | ||
} | ||
|
||
|
||
class SanFranciscoDataset(PolsarDatasetHandler): | ||
|
||
def __init__(self, dataset_name: str, mode: str, *args, **kwargs): | ||
dataset_name = dataset_name.upper() | ||
assert dataset_name in AVAILABLE_IMAGES, f"Unknown data {dataset_name}." | ||
super(SanFranciscoDataset, self).__init__(root_path=str(Path(root_path) / dataset_name), | ||
name=dataset_name, mode=mode, *args, **kwargs) | ||
|
||
def print_ground_truth(self, t=None, *args, **kwargs): | ||
if t is None: | ||
t = self.get_image() if self.mode == "t" else None | ||
super(SanFranciscoDataset, self).print_ground_truth(t=t, *args, **kwargs) | ||
|
||
def get_sparse_labels(self): | ||
labels = imread(Path(root_path) / self.name / (self.name + "-label2d.png")) | ||
return labels | ||
|
||
def get_image(self, save_image: bool = False) -> np.ndarray: | ||
folder = "SAN_FRANCISCO_" + self.name[3:] | ||
if self.mode == "s": | ||
data = self.open_s_dataset(str(Path(root_path) / self.name / folder)) | ||
elif self.mode == "t": | ||
data = self.open_t_dataset_t3(str(Path(root_path) / self.name / folder / "T4")) | ||
else: | ||
raise ValueError(f"Mode {self.mode} not supported.") | ||
data = data[ | ||
AVAILABLE_IMAGES[self.name]["y1"]:AVAILABLE_IMAGES[self.name]["y2"], | ||
AVAILABLE_IMAGES[self.name]["x1"]:AVAILABLE_IMAGES[self.name]["x2"] | ||
] | ||
if AVAILABLE_IMAGES[self.name]["y_inverse"]: | ||
data = np.flip(data, axis=0) | ||
return data |
Oops, something went wrong.