From 3768d4bf2a3e181a0e8e4aac4891b8adfc946a3e Mon Sep 17 00:00:00 2001 From: Zeref996 <825276847@qq.com> Date: Thu, 2 Jan 2025 02:38:30 +0000 Subject: [PATCH] plt update torch, test=model --- .../e2e/PaddleLT_new/engine/torch_eval.py | 6 +- .../e2e/PaddleLT_new/engine/torch_train.py | 411 ++++++++++++++++++ .../PaddleLT_new/generator/builder_data.py | 4 + .../math_extreme_size/abs_giant_size_func.py | 4 +- framework/e2e/PaddleLT_new/layertest.py | 6 +- .../e2e/PaddleLT_new/strategy/compare.py | 2 + .../math_extreme_size/abs_giant_size_func.py | 4 +- .../yaml/dy_eval^torch_dy_eval.yml | 2 + .../yaml/dy_train^torch_dy_train.yml | 36 ++ 9 files changed, 468 insertions(+), 7 deletions(-) create mode 100644 framework/e2e/PaddleLT_new/engine/torch_train.py create mode 100644 framework/e2e/PaddleLT_new/yaml/dy_train^torch_dy_train.yml diff --git a/framework/e2e/PaddleLT_new/engine/torch_eval.py b/framework/e2e/PaddleLT_new/engine/torch_eval.py index f609c182aa..744d825a97 100644 --- a/framework/e2e/PaddleLT_new/engine/torch_eval.py +++ b/framework/e2e/PaddleLT_new/engine/torch_eval.py @@ -30,7 +30,11 @@ def __init__(self, testing, layerfile, device_place_id, upstream_net, orderdict_ reset(self.seed) self.device = os.environ.get("PLT_SET_DEVICE") - torch.device(f"cuda:{device_place_id}") + # torch.device(f"cuda:{device_place_id}") + device = torch.device(f"cuda:{device_place_id}") + # device = torch.device('cuda:0') + # torch.cuda.set_device(device) + torch.set_default_device(device) self.testing = testing self.upstream_net = upstream_net diff --git a/framework/e2e/PaddleLT_new/engine/torch_train.py b/framework/e2e/PaddleLT_new/engine/torch_train.py new file mode 100644 index 0000000000..07be211cee --- /dev/null +++ b/framework/e2e/PaddleLT_new/engine/torch_train.py @@ -0,0 +1,411 @@ +#!/bin/env python3 +# -*- coding: utf-8 -*- +# @author Zeref996 +# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python +""" +train 方法 +""" +import os +import numpy as np +import torch +from engine.torch_xtools import reset + +from generator.builder_layer import BuildLayer +from generator.builder_data import BuildData +from generator.builder_optimizer import BuildOptimizer +from generator.builder_loss import BuildLoss + +from pltools.logger import Logger + + +class LayerTrain(object): + """ + 构建Layer训练的通用类 + """ + + # def __init__(self, testing, layerfile, device_id): + def __init__(self, testing, layerfile, device_place_id, upstream_net, orderdict_usage="None"): + """ + 初始化 + """ + self.seed = 33 + reset(self.seed) + self.device = os.environ.get("PLT_SET_DEVICE") + device = torch.device(f"cuda:{device_place_id}") + torch.set_default_device(device) + Logger("LayerTrain.__init__").get_log().info(f"device_place_id is: {device_place_id}") + + self.testing = testing + self.upstream_net = upstream_net + self.return_net_instance = self.testing.get("return_net_instance", "False") + self.model_dtype = self.testing.get("model_dtype") + # torch.set_default_dtype(self.model_dtype) + + self.layerfile = layerfile + self.step = self.testing.get("step") + + def _unset_flags(self, engine_str="test_engine"): + """unset flags""" + if "FLAGS_enable_auto_recompute" in os.environ: + del os.environ["FLAGS_enable_auto_recompute"] + Logger(engine_str).get_log().info("已取消环境变量FLAGS_enable_auto_recompute") + + def _net_input(self): + """get input""" + reset(self.seed) + data = BuildData(layerfile=self.layerfile).get_single_data() + return data + + def _net_instant(self): + """get net""" + reset(self.seed) + if self.upstream_net: + net = self.upstream_net + else: + net = BuildLayer(layerfile=self.layerfile).get_layer() + return net + + def _net_optimizer(self): + """get optimizer""" + reset(self.seed) + optimizer_name = self.testing.get("optimizer").get("optimizer_name") + optimizer_param = self.testing.get("optimizer").get("params") + optimizer = BuildOptimizer(optimizer_name=optimizer_name, optimizer_param=optimizer_param) + return optimizer + + def _net_loss(self): + """get net""" + reset(self.seed) + loss_name = self.testing.get("Loss").get("loss_name") + loss_param = self.testing.get("Loss").get("params") + loss = BuildLoss(loss_name=loss_name, loss_param=loss_param) + return loss + + # def _net_input_and_spec(self): + # """get input and inputspec""" + # reset(self.seed) + # data, input_spec = BuildData(layerfile=self.layerfile).get_single_input_and_spec() + # return data, input_spec + + # def _net_input_and_static_spec(self): + # """get input and static inputspec""" + # reset(self.seed) + # data, input_spec = BuildData(layerfile=self.layerfile).get_single_input_and_static_spec() + # return data, input_spec + + # def _net_input_and_multi_spec(self): + # """get input and multi inputspec""" + # reset(self.seed) + # data, spec_gen = BuildData(layerfile=self.layerfile).get_single_input_and_multi_spec() + # return data, spec_gen + + def _get_data_grad(self, data): + """记录list[inputs...]中的input.grad并生成list[input.grad...]""" + data_grad = [] + for i in data: + if isinstance(i, torch.Tensor): + data_grad.append(i.grad) + return data_grad + + def dy_train(self): + """dygraph train""" + # data, net, optimizer, loss = self._get_instant() + data = self._net_input() + net = self._net_instant() + optimizer = self._net_optimizer() + loss = self._net_loss() + + net.train() + # print(self.net.parameters()) 打印参数parameters + + # 构建optimizer用于训练 + if net.parameters(): + opt = optimizer.get_opt(net=net) + + for epoch in range(self.step): + logit = net(*data) + # 构建loss用于训练 + dy_loss = loss.get_loss(logit) + dy_loss.backward() + if net.parameters(): + opt.step() + opt.clear_grad() + + Logger("dy_train").get_log().info(f"已完成 {epoch} 轮训练") + data_grad = self._get_data_grad(data) + if self.return_net_instance == "True": + return {"res": {"logit": logit, "data_grad": data_grad}, "net": net} + else: + return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy_dp_train(self): + # """dygraph data parallel train""" + # from torch.distributed import fleet + + # fleet.init(is_collective=True) + + # data = self._net_input() + # net = self._net_instant() + # dp_net = fleet.distributed_model(net) + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + + # # 构建optimizer用于训练 + # if net.parameters(): + # opt = optimizer.get_opt(net=net) + # opt = fleet.distributed_optimizer(opt) + + # for epoch in range(self.step): + # logit = dp_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if dp_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy_dp_train").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + # # return {"logit": logit, "data_grad": data_grad} + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy_train_dl(self): + # """dygraph train with dataloader""" + # reset(self.seed) + + # # net = self.net.get_layer() + # self.net.train() + + # # 构建optimizer用于训练 + # opt = self.optimizer.get_opt(net=self.net) + + # for epoch in range(self.step): + # for i, data_dict in enumerate(self.data()): + # logit = self.net(**data_dict) + # # 构建loss用于训练 + # # logit = self.loss_info.get_loss(logit) + # loss = self.loss.get_loss(logit) + # loss.backward() + # opt.step() + # opt.clear_grad() + # return logit + + # def dy2st_train(self): + # """dy2st train""" + + # # if not self.net.parameters(): + # # return "pass" + + # data = self._net_input() + # net = self._net_instant() + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + # st_net = torch.jit.to_static(net, full_graph=True) + + # # 构建optimizer用于训练 + # if st_net.parameters(): + # opt = optimizer.get_opt(net=st_net) + + # for epoch in range(self.step): + # logit = st_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if st_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy2st_train").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + # # return {"logit": logit, "data_grad": data_grad} + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": st_net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy2st_train_inputspec(self): + # """dy2st train with dy inputspec""" + # data, input_spec = self._net_input_and_spec() + # Logger("dy2st_train_inputspec").get_log().info(f"待测动态InputSpec为: {input_spec}") + # net = self._net_instant() + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + # st_net = torch.jit.to_static(net, full_graph=True, input_spec=input_spec) + + # # 构建optimizer用于训练 + # if st_net.parameters(): + # opt = optimizer.get_opt(net=st_net) + + # for epoch in range(self.step): + # logit = st_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if st_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy2st_train_inputspec").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + # # return {"logit": logit, "data_grad": data_grad} + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": st_net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy2st_train_static_inputspec(self): + # """dy2st train with st inputspec""" + # data, input_spec = self._net_input_and_static_spec() + # Logger("dy2st_train_static_inputspec").get_log().info(f"待测静态InputSpec为: {input_spec}") + # net = self._net_instant() + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + # st_net = torch.jit.to_static(net, full_graph=True, input_spec=input_spec) + + # # 构建optimizer用于训练 + # if st_net.parameters(): + # opt = optimizer.get_opt(net=st_net) + + # for epoch in range(self.step): + # logit = st_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if st_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy2st_train_static_inputspec").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + # # return {"logit": logit, "data_grad": data_grad} + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": st_net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy2st_train_cinn(self): + # """dy2st cinn train""" + + # if os.environ.get("PLT_enable_auto_recompute", "0") == "1": + # os.environ["FLAGS_enable_auto_recompute"] = "1" + # Logger("dy2st_train_cinn").get_log().info("已设定环境变量FLAGS_enable_auto_recompute") + # data = self._net_input() + # net = self._net_instant() + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + # build_strategy = torch.static.BuildStrategy() + # build_strategy.build_cinn_pass = True + # cinn_net = torch.jit.to_static(net, build_strategy=build_strategy, full_graph=True) + + # # 构建optimizer用于训练 + # if cinn_net.parameters(): + # opt = optimizer.get_opt(net=cinn_net) + + # for epoch in range(self.step): + # logit = cinn_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if cinn_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy2st_train_cinn").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + + # self._unset_flags(engine_str="dy2st_train_cinn") + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": cinn_net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy2st_train_cinn_inputspec(self): + # """dy2st cinn train with dy inputspec""" + + # if os.environ.get("PLT_enable_auto_recompute", "0") == "1": + # os.environ["FLAGS_enable_auto_recompute"] = "1" + # Logger("dy2st_train_cinn_inputspec").get_log().info("已设定环境变量FLAGS_enable_auto_recompute") + # data, input_spec = self._net_input_and_spec() + # Logger("dy2st_train_cinn_inputspec").get_log().info(f"待测动态InputSpec为: {input_spec}") + # net = self._net_instant() + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + # build_strategy = torch.static.BuildStrategy() + # build_strategy.build_cinn_pass = True + # cinn_net = torch.jit.to_static(net, build_strategy=build_strategy, full_graph=True, input_spec=input_spec) + + # # 构建optimizer用于训练 + # if cinn_net.parameters(): + # opt = optimizer.get_opt(net=cinn_net) + + # for epoch in range(self.step): + # logit = cinn_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if cinn_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy2st_train_cinn_inputspec").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + + # self._unset_flags(engine_str="dy2st_train_cinn_inputspec") + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": cinn_net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} + + # def dy2st_train_cinn_static_inputspec(self): + # """dy2st cinn train with st inputspec""" + + # if os.environ.get("PLT_enable_auto_recompute", "0") == "1": + # os.environ["FLAGS_enable_auto_recompute"] = "1" + # Logger("dy2st_train_cinn_static_inputspec").get_log().info("已设定环境变量FLAGS_enable_auto_recompute") + # data, input_spec = self._net_input_and_static_spec() + # Logger("dy2st_train_cinn_static_inputspec").get_log().info(f"待测静态InputSpec为: {input_spec}") + # net = self._net_instant() + # optimizer = self._net_optimizer() + # loss = self._net_loss() + + # net.train() + # build_strategy = torch.static.BuildStrategy() + # build_strategy.build_cinn_pass = True + # cinn_net = torch.jit.to_static(net, build_strategy=build_strategy, full_graph=True, input_spec=input_spec) + + # # 构建optimizer用于训练 + # if cinn_net.parameters(): + # opt = optimizer.get_opt(net=cinn_net) + + # for epoch in range(self.step): + # logit = cinn_net(*data) + # # 构建loss用于训练 + # dy_loss = loss.get_loss(logit) + # dy_loss.backward() + # if cinn_net.parameters(): + # opt.step() + # opt.clear_grad() + + # Logger("dy2st_train_cinn_static_inputspec").get_log().info(f"已完成 {epoch} 轮训练") + # data_grad = self._get_data_grad(data) + + # self._unset_flags(engine_str="dy2st_train_cinn_static_inputspec") + # if self.return_net_instance == "True": + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": cinn_net} + # else: + # return {"res": {"logit": logit, "data_grad": data_grad}, "net": None} diff --git a/framework/e2e/PaddleLT_new/generator/builder_data.py b/framework/e2e/PaddleLT_new/generator/builder_data.py index d00c1aed3e..0fcef89238 100644 --- a/framework/e2e/PaddleLT_new/generator/builder_data.py +++ b/framework/e2e/PaddleLT_new/generator/builder_data.py @@ -54,8 +54,10 @@ def get_single_data(self, framework="paddle"): tmp.append(paddle.to_tensor(j, stop_gradient=False)) elif framework == "torch": if j.dtype == np.int64 or j.dtype == np.int32: + # tmp.append(torch.tensor(j, requires_grad=False, device=torch.device('cuda:0'))) tmp.append(torch.tensor(j, requires_grad=False)) else: + # tmp.append(torch.tensor(j, requires_grad=True, device=torch.device('cuda:0'))) tmp.append(torch.tensor(j, requires_grad=True)) data.append(tmp) elif isinstance(i, np.ndarray): @@ -66,8 +68,10 @@ def get_single_data(self, framework="paddle"): data.append(paddle.to_tensor(i, stop_gradient=False)) elif framework == "torch": if i.dtype == np.int64 or i.dtype == np.int32: + # data.append(torch.tensor(i, requires_grad=False, device=torch.device('cuda:0'))) data.append(torch.tensor(i, requires_grad=False)) else: + # data.append(torch.tensor(i, requires_grad=True, device=torch.device('cuda:0'))) data.append(torch.tensor(i, requires_grad=True)) elif isinstance(i, float): data.append(paddle.to_tensor(i, stop_gradient=False)) diff --git a/framework/e2e/PaddleLT_new/layerApicase/math_extreme_size/abs_giant_size_func.py b/framework/e2e/PaddleLT_new/layerApicase/math_extreme_size/abs_giant_size_func.py index c4585dca0b..fc19515aaf 100644 --- a/framework/e2e/PaddleLT_new/layerApicase/math_extreme_size/abs_giant_size_func.py +++ b/framework/e2e/PaddleLT_new/layerApicase/math_extreme_size/abs_giant_size_func.py @@ -33,7 +33,7 @@ def create_tensor_inputs(): """ paddle tensor """ - inputs = (paddle.to_tensor(-1 + (1 - -1) * np.random.random([1024, 256, 128, 100, 2]).astype('float32'), dtype='float32', stop_gradient=False), ) + inputs = (paddle.to_tensor(-1 + (1 - -1) * np.random.random([1024, 256, 64, 100, 2]).astype('float32'), dtype='float32', stop_gradient=False), ) return inputs @@ -41,6 +41,6 @@ def create_numpy_inputs(): """ numpy array """ - inputs = (-1 + (1 - -1) * np.random.random([1024, 256, 128, 100, 2]).astype('float32'), ) + inputs = (-1 + (1 - -1) * np.random.random([1024, 256, 64, 100, 2]).astype('float32'), ) return inputs diff --git a/framework/e2e/PaddleLT_new/layertest.py b/framework/e2e/PaddleLT_new/layertest.py index 0d5d8eec4f..73509604f3 100644 --- a/framework/e2e/PaddleLT_new/layertest.py +++ b/framework/e2e/PaddleLT_new/layertest.py @@ -110,7 +110,7 @@ def _case_run(self): res = self._single_run( testing=testing, layerfile=self.layerfile, - device_place_id=self.device_place_id, + device_place_id=self.testings.get(testing).get("device_place_id", self.device_place_id), upstream_net=net, framework=self.testings.get(testing).get("framework", "paddle"), orderdict_usage=self.testings.get(testing).get("orderdict_usage", "None"), @@ -257,8 +257,10 @@ def _perf_unit_case_run(self, plt_exc): if __name__ == "__main__": - # layerfile = "layerApicase/nn_sublayer/Conv2D_0_class.py" + # layerfile = "layerApicase/math_extreme_size/abs_giant_size_func.py" # testing = "yaml/dy_eval^torch_dy_eval.yml" + # # testing = "yaml/dy_eval.yml" + # # testing = "yaml/dy_train.yml" # single_test = LayerTest(title=layerfile, layerfile=layerfile, testing=testing) # single_test._case_run() # exit(0) diff --git a/framework/e2e/PaddleLT_new/strategy/compare.py b/framework/e2e/PaddleLT_new/strategy/compare.py index a7d12b6ba7..135883f245 100644 --- a/framework/e2e/PaddleLT_new/strategy/compare.py +++ b/framework/e2e/PaddleLT_new/strategy/compare.py @@ -199,6 +199,8 @@ def torch_compare(result, expect, res_name, exp_name, logger, delta=1e-10, rtol= if isinstance(result, eval("paddle.Tensor")): result = result.numpy() if torch.is_tensor(expect): + # expect = expect.numpy() + expect = expect.cpu() expect = expect.detach().numpy() # res = np.allclose(result, expect, atol=delta, rtol=rtol, equal_nan=True) # # 出错打印错误数据 diff --git a/framework/e2e/PaddleLT_new/torch_case/layerApicase/math_extreme_size/abs_giant_size_func.py b/framework/e2e/PaddleLT_new/torch_case/layerApicase/math_extreme_size/abs_giant_size_func.py index 7979082a97..83239a47c3 100644 --- a/framework/e2e/PaddleLT_new/torch_case/layerApicase/math_extreme_size/abs_giant_size_func.py +++ b/framework/e2e/PaddleLT_new/torch_case/layerApicase/math_extreme_size/abs_giant_size_func.py @@ -26,7 +26,7 @@ def create_tensor_inputs(): """ PyTorch tensor """ - inputs = (torch.tensor((-1 + 2 * np.random.random([1024, 256, 128, 100, 2])).astype(np.float32), dtype=torch.float32, requires_grad=True), ) + inputs = (torch.tensor((-1 + 2 * np.random.random([1024, 256, 64, 100, 2])).astype(np.float32), dtype=torch.float32, requires_grad=True), ) return inputs @@ -35,5 +35,5 @@ def create_numpy_inputs(): numpy array """ # 生成一个形状为[1024, 256, 128, 100, 2]的随机numpy数组,数据范围在[-1, 1) - inputs = ((-1 + 2 * np.random.random([1024, 256, 128, 100, 2])).astype('float32'),) + inputs = ((-1 + 2 * np.random.random([1024, 256, 64, 100, 2])).astype('float32'),) return inputs diff --git a/framework/e2e/PaddleLT_new/yaml/dy_eval^torch_dy_eval.yml b/framework/e2e/PaddleLT_new/yaml/dy_eval^torch_dy_eval.yml index cf366c8292..81efb72ede 100644 --- a/framework/e2e/PaddleLT_new/yaml/dy_eval^torch_dy_eval.yml +++ b/framework/e2e/PaddleLT_new/yaml/dy_eval^torch_dy_eval.yml @@ -2,11 +2,13 @@ testings: torch_dy_eval: framework: "torch" orderdict_usage: "save" + device_place_id: "1" model_dtype: "float32" dy_eval: framework: "paddle" orderdict_usage: "load" + device_place_id: "0" model_dtype: "float32" compare: diff --git a/framework/e2e/PaddleLT_new/yaml/dy_train^torch_dy_train.yml b/framework/e2e/PaddleLT_new/yaml/dy_train^torch_dy_train.yml new file mode 100644 index 0000000000..e76c02dd2c --- /dev/null +++ b/framework/e2e/PaddleLT_new/yaml/dy_train^torch_dy_train.yml @@ -0,0 +1,36 @@ +testings: + torch_dy_train: + framework: "torch" + orderdict_usage: "save" + device_place_id: "1" + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 1 + + dy_train: + framework: "paddle" + orderdict_usage: "load" + device_place_id: "0" + model_dtype: "float32" + Loss: + loss_name: "diy.loss.diy_loss.mean_loss" + optimizer: + optimizer_name: "diy.optimizer.diy_opt.naive_opt" + params: + opt_api: "paddle.optimizer.SGD" + learning_rate: 0.00001 + step: 1 + +compare: + - + baseline: 'torch_dy_train' + latest: 'dy_train' + precision: + delta: 0.00001 + rtol: 0.000001