Skip to content

Commit

Permalink
Merge branch 'nextjs' into feature-841
Browse files Browse the repository at this point in the history
  • Loading branch information
codingwithsurya authored Jun 28, 2023
2 parents 706db69 + c6c0b19 commit 7603c53
Show file tree
Hide file tree
Showing 7 changed files with 434 additions and 3 deletions.
6 changes: 3 additions & 3 deletions frontend/src/pages/wiki.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ const Wiki = () => {
<Image
src={dropout_dg}
alt="Dropout diagram"
style={{ maxHeight: 200, marginInline: "auto" }}
style={{ maxHeight: 200, maxWidth: 400, marginInline: "auto" }}
/>

<h5>Documentation</h5>
Expand Down Expand Up @@ -312,7 +312,7 @@ const Wiki = () => {
<Image
src={sigmoid_eq}
alt="Sigmoid plot"
style={{ maxHeight: 300, marginInline: "auto" }}
style={{ maxHeight: 300, maxWidth: 500, marginInline: "auto" }}
/>

<h5>Documentation</h5>
Expand Down Expand Up @@ -361,7 +361,7 @@ const Wiki = () => {
<Image
src={softmax_eq}
alt="Softmax equation"
style={{ maxHeight: 200, marginInline: "auto" }}
style={{ maxHeight: 200, maxWidth: 500, marginInline: "auto" }}
/>

<p>
Expand Down
10 changes: 10 additions & 0 deletions openapi/openapi.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,22 @@ paths:
$ref: paths/s3/getSignedUploadUrl.yaml
/api/s3/getUserDatasetFilesData:
$ref: paths/s3/getUserDatasetFilesData.yaml
/api/s3/getUserDatasetFileUploadPresignedPostObj:
$ref: paths/s3/getUserDatasetFileUploadPresignedPostObj.yaml
/api/test:
$ref: paths/test/none.yaml
/api/trainspace/create-trainspace:
$ref: paths/trainspace/create-trainspace.yaml
/api/trainspace/getTrainspaceData:
$ref: paths/trainspace/getTrainspaceData.yaml
/api/trainspace/getUserProgressData:
$ref: paths/trainspace/getUserProgressData.yaml
/api/trainspace/updateOneUserProgressData:
$ref: paths/trainspace/updateOneUserProgressData.yaml
/api/train/img-run:
$ref: paths/train/img-run.yaml
/api/train/tabular-run:
$ref: paths/train/tabular-run.yaml
# components:
# securitySchemes:
# main_auth:
Expand Down
61 changes: 61 additions & 0 deletions openapi/paths/s3/getUserDatasetFileUploadPresignedPostObj.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
post:
summary: Get User Dataset file - Upload Presigned Post Object
description: API Endpoint to use S3 Presigned URLs to upload dataset files to S3 given the user ID, the data source, and the file name.
tags:
- s3
requestBody:
content:
application/json:
schema:
type: object
required:
- uid
- data_source
- name
properties:
uid:
type: string
description: The unique user ID
data_source:
type: string
description: "What type of training was the user running (eg: TABULAR, PRETRAINED, OBJECT_DETECTION, IMAGE, etc)"
name:
type: string
description: The dataset file name
example:
uid: "1234"
data_source: "TABULAR"
file_name: "data.csv"
required: true
responses:
"200":
description: Presigned URLs fetched successfully
content:
application/json:
schema:
type: object
properties:
message:
type: string
example: "Presigned URL fetch successful"
presigned_post_obj:
type: string
example: "{'url': 'https://cloudberry-examples.s3.amazonaws.com/',
'fields': {
'key': 'uploads/image.jpg',
'AWSAccessKeyId': 'AKIALGKOKBY37F5FZF4I',
'policy': 'eyJleHBpcmF0aW9uIjogIjIwMTgtMDctMTNUMDI6Mzg6MTBaIiwgImNvbmRpdGlvbnMiOiBbWyJjb250ZW50LWxlbmd0aC1yYW5nZSIsIDEsIDEwNDg1NzldLCB7ImJ1Y2tldCI6ICJjbG91ZGJlcnJ5LWV4YW1wbGVzIn0sIHsia2V5IjogInVwbG9hZHMvaW1hZ2UuanBnIn1dfQ==',
'signature': 'ZY7Orehfdzg+ToJJXhYuV/XyK5o='}}"

"400":
description: Presigned URL fetch didn't go through successfully. This is usually something wrong with your code
"401":
description: User is not authenticated
content:
application/json:
schema:
type: object
properties:
error:
type: string
example: "User is not authenticated"
132 changes: 132 additions & 0 deletions openapi/paths/train/img-run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
post:
summary: Image Run
description: API Endpoint to train an image model via Pytorch.
tags:
- train
requestBody:
content:
application/json:
schema:
type: object
required:
- uid
- name
- dataset_data
- parameters_data
properties:
uid:
type: string
description: User ID
name:
type: string
description: Name of the trainspace
dataset_data:
type: object
description: DatasetData instance
example:
name: "data.csv"
is_default_dataset: false
properties:
name:
type: string
description: Name of the dataset file
is_default_dataset:
type: boolean
description: Whether the dataset is a built-in dataset
parameters_data:
type: object
properties:
problem_type:
type: string
description: Classification or Regression problem
criterion:
type: string
description: Loss function
optimizer_name:
type: string
description: What optimizer should the model use during gradient descent
example: "SGD"
shuffle:
type: boolean
description: Should the data be shuffled around before training?
epochs:
type: integer
description: How many epochs/iterations to run the model
test_size:
type: number
description: What percentage of your dataset should be dedicated for testing the performance of the model
batch_size:
type: integer
description: How big should each "batch" of the dataset be. This is for training in batch during the epoch
layers:
type: array
description: Architecture of Model
train_transform:
type: array
description: The image transformations you want to apply to the training data
test_transform:
type: array
description: The image transformations you want to apply to the testing data
required:
- problem_type
- criterion
- optimizer_name
- shuffle
- epochs
- test_size
- batch_size
- layers
- train_transform
- test_transform
example:
uid: "1234"
name: "My trainspace"
dataset_data:
name: "data.zip"
is_default_dataset: false
parameters_data:
problem_type: "classification"
criterion: "CELOSS"
optimizer_name: "ADAM"
shuffle: true
epochs: 10
test_size: 0.2
batch_size: 20
layers:
[
"nn.Conv2d(1, 5, 3, 1, 1)",
"nn.MaxPool2d(3, 1)",
"nn.Flatten()",
"nn.Linear(500, 10)",
"nn.Sigmoid()",
]
train_transform:
[
"torchvision.transforms.Grayscale()",
"torchvision.transforms.toTensor()",
"torchvision.transforms.Resize((32, 32))",
]
required: true
responses:
"200":
description: Image DL model trained successfully. Shows the results page.
content:
application/json:
schema:
type: object
properties:
message:
type: string
example: "Training successful"
"400":
description: Error in training of model. Could come from problems with the user's request or on the server side.
"401":
description: User is not authenticated
content:
application/json:
schema:
type: object
properties:
error:
type: string
example: "User is not authenticated"
129 changes: 129 additions & 0 deletions openapi/paths/train/tabular-run.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,129 @@
post:
summary: Tabular Run
description: API Endpoint to train a Deep Learning (DL) Model for tabular datasets.
tags:
- train
requestBody:
content:
application/json:
schema:
type: object
required:
- uid
- name
- dataset_data
- parameters_data
properties:
uid:
type: string
description: User ID
name:
type: string
description: Name of the trainspace
dataset_data:
type: object
description: DatasetData instance
example:
name: "data.csv"
is_default_dataset: false
properties:
name:
type: string
description: Name of the dataset file
is_default_dataset:
type: boolean
description: Whether the dataset is a built-in dataset
parameters_data:
type: object
properties:
target_col:
type: string
description: Target column to predict/classify
features:
type: array
description: Input columns to the model
items:
type: string
problem_type:
type: string
description: Classification or regression problem
criterion:
type: string
description: Loss function to use
optimizer_name:
type: string
description: What optimizer should the model use during gradient descent
example: "SGD"
shuffle:
type: boolean
description: Should the rows be shuffled or order be maintained?
epochs:
type: integer
description: How many epochs/iterations do we train model for
test_size:
type: number
description: What percentage of your dataset should be dedicated for testing the performance of the model
batch_size:
type: integer
description: How big should each "batch" of the dataset be. This is for training in batch during the epoch
layers:
type: array
description: Architecture of Model
required:
- target_col
- features
- problem_type
- criterion
- optimizer_name
- shuffle
- epochs
- test_size
- batch_size
- layers
example:
uid: "1234"
name: "My trainspace"
dataset_data:
name: "data.csv"
is_default_dataset: false
parameters_data:
target_col: "target"
features: ["col1", "col2", "col3"]
problem_type: "classification"
criterion: "CELOSS"
optimizer_name: "SGD"
shuffle: true
epochs: 10
test_size: 0.2
batch_size: 20
layers:
[
"nn.Linear(10, 3)",
"nn.ReLU()",
"nn.Linear(3, 10)",
"nn.Softmax(-1)",
]
required: true
responses:
"200":
description: Training successful. Shows the results page.
content:
application/json:
schema:
type: object
properties:
message:
type: string
example: "Training successful"
"400":
description: Error in training of model. Could come from problems with the user's request or on the server side.
"401":
description: User is not authenticated
content:
application/json:
schema:
type: object
properties:
error:
type: string
example: "User is not authenticated"
Loading

0 comments on commit 7603c53

Please sign in to comment.