Skip to content

Commit

Permalink
MAINT: align examples wiht isort and black (intel#1386)
Browse files Browse the repository at this point in the history
apply isort and black fixes for examples
  • Loading branch information
samir-nasibli committed Aug 1, 2023
1 parent 8d4ad6f commit 156dfff
Show file tree
Hide file tree
Showing 114 changed files with 1,827 additions and 1,308 deletions.
20 changes: 11 additions & 9 deletions examples/daal4py/adaboost_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,26 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py Adaboost example for shared memory systems

import daal4py as d4p
import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
infile = "./data/batch/adaboost_train.csv"
testfile = "./data/batch/adaboost_test.csv"
nClasses = 2
Expand All @@ -55,7 +57,7 @@ def main(readcsv=read_csv, method='defaultDense'):

# The prediction result provides prediction
assert predict_result.prediction.shape == (pdata.shape[0], dep_data.shape[1])
ptdata = np.loadtxt(testfile, usecols=range(20, 21), delimiter=',', ndmin=2)
ptdata = np.loadtxt(testfile, usecols=range(20, 21), delimiter=",", ndmin=2)
assert np.allclose(predict_result.prediction, ptdata)

return (train_result, predict_result, ptdata)
Expand All @@ -66,6 +68,6 @@ def main(readcsv=read_csv, method='defaultDense'):
print("\nGround truth (first 20 observations):\n", ptdata[:20])
print(
"Adaboost classification results: (first 20 observations):\n",
predict_result.prediction[:20]
predict_result.prediction[:20],
)
print('All looks good!')
print("All looks good!")
28 changes: 16 additions & 12 deletions examples/daal4py/adagrad_mse_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,27 +12,29 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py AdaGrad (Adaptive Subgradient Method) example for shared memory systems
# using Mean Squared Error objective function

import daal4py as d4p
import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
infile = "./data/batch/mse.csv"
# Read the data, let's have 3 independent variables
data = readcsv(infile, range(3))
Expand All @@ -46,11 +48,13 @@ def main(readcsv=read_csv, method='defaultDense'):
# configure an AdaGrad object
lr = np.array([[1.0]], dtype=np.double)
niters = 1000
sgd_algo = d4p.optimization_solver_adagrad(mse_algo,
learningRate=lr,
accuracyThreshold=0.0000001,
nIterations=niters,
batchSize=1)
sgd_algo = d4p.optimization_solver_adagrad(
mse_algo,
learningRate=lr,
accuracyThreshold=0.0000001,
nIterations=niters,
batchSize=1,
)

# finally do the computation
inp = np.array([[8], [2], [1], [4]], dtype=np.double)
Expand All @@ -66,4 +70,4 @@ def main(readcsv=read_csv, method='defaultDense'):
res = main()
print("\nMinimum:\n", res.minimum)
print("\nNumber of iterations performed:\n", res.nIterations[0][0])
print('All looks good!')
print("All looks good!")
18 changes: 10 additions & 8 deletions examples/daal4py/association_rules_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,26 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py assiciation rules example for shared memory systems

import daal4py as d4p
import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c=None, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c=None, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
infile = "./data/batch/apriori.csv"

# configure a association_rules object
Expand All @@ -57,6 +59,6 @@ def main(readcsv=read_csv, method='defaultDense'):

if __name__ == "__main__":
result1 = main()
print('Confidence: (20 first)')
print("Confidence: (20 first)")
print(result1.confidence[0:20])
print('All looks good!')
print("All looks good!")
16 changes: 9 additions & 7 deletions examples/daal4py/bacon_outlier_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,26 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py outlier detection bacon example for shared memory systems

import daal4py as d4p
import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
# Input file
infile = "./data/batch/outlierdetection.csv"

Expand All @@ -55,4 +57,4 @@ def main(readcsv=read_csv, method='defaultDense'):

print("\nInput data\n", data)
print("\nOutlier detection result (Bacon method) weights:\n", res.weights)
print('All looks good!')
print("All looks good!")
24 changes: 13 additions & 11 deletions examples/daal4py/bf_knn_classification_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,30 +12,33 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py Brute Force KNN example for shared memory systems

import daal4py as d4p
import numpy as np
import os

import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
# Input data set parameters
train_file = os.path.join('data', 'batch', 'k_nearest_neighbors_train.csv')
predict_file = os.path.join('data', 'batch', 'k_nearest_neighbors_test.csv')
train_file = os.path.join("data", "batch", "k_nearest_neighbors_train.csv")
predict_file = os.path.join("data", "batch", "k_nearest_neighbors_test.csv")

# Read data. Let's use 5 features per observation
nFeatures = 5
Expand Down Expand Up @@ -69,6 +72,5 @@ def main(readcsv=read_csv, method='defaultDense'):
print("Brute Force kNN classification results:")
print("Ground truth(observations #30-34):\n", predict_labels[30:35])
print(
"Classification results(observations #30-34):\n",
predict_result.prediction[30:35]
"Classification results(observations #30-34):\n", predict_result.prediction[30:35]
)
20 changes: 11 additions & 9 deletions examples/daal4py/brownboost_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,26 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py Brownboost example for shared memory systems

import daal4py as d4p
import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
infile = "./data/batch/brownboost_train.csv"
testfile = "./data/batch/brownboost_test.csv"

Expand All @@ -54,7 +56,7 @@ def main(readcsv=read_csv, method='defaultDense'):

# The prediction result provides prediction
assert predict_result.prediction.shape == (pdata.shape[0], dep_data.shape[1])
ptdata = np.loadtxt(testfile, usecols=range(20, 21), delimiter=',', ndmin=2)
ptdata = np.loadtxt(testfile, usecols=range(20, 21), delimiter=",", ndmin=2)
assert np.allclose(predict_result.prediction, ptdata)

return (train_result, predict_result, ptdata)
Expand All @@ -65,6 +67,6 @@ def main(readcsv=read_csv, method='defaultDense'):
print("\nGround truth (first 20 observations):\n", ptdata[:20])
print(
"Brownboost classification results: (first 20 observations):\n",
predict_result.prediction[:20]
predict_result.prediction[:20],
)
print('All looks good!')
print("All looks good!")
16 changes: 9 additions & 7 deletions examples/daal4py/cholesky_batch.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#===============================================================================
# ===============================================================================
# Copyright 2014 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
Expand All @@ -12,26 +12,28 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# ===============================================================================

# daal4py cholesky example for shared memory systems

import daal4py as d4p
import numpy as np

import daal4py as d4p

# let's try to use pandas' fast csv reader
try:
import pandas

def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=t)
return pandas.read_csv(f, usecols=c, delimiter=",", header=None, dtype=t)

except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2)
return np.loadtxt(f, usecols=c, delimiter=",", ndmin=2)


def main(readcsv=read_csv, method='defaultDense'):
def main(readcsv=read_csv, method="defaultDense"):
infile = "./data/batch/cholesky.csv"

# configure a cholesky object
Expand All @@ -45,4 +47,4 @@ def main(readcsv=read_csv, method='defaultDense'):
if __name__ == "__main__":
result = main()
print("\nFactor:\n", result.choleskyFactor)
print('All looks good!')
print("All looks good!")
Loading

0 comments on commit 156dfff

Please sign in to comment.