@@ -8,7 +8,6 @@ | |||||
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | # "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
# pylint: disable=redefined-builtin | # pylint: disable=redefined-builtin | ||||
from .elemwise import * | from .elemwise import * | ||||
from .loss import * | |||||
from .math import * | from .math import * | ||||
from .nn import * | from .nn import * | ||||
from .quantized import conv_bias_activation | from .quantized import conv_bias_activation | ||||
@@ -55,7 +55,7 @@ def l1_loss(pred: Tensor, label: Tensor) -> Tensor: | |||||
ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32)) | ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32)) | ||||
tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32)) | tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32)) | ||||
loss = F.l1_loss(ipt, tgt) | |||||
loss = F.nn.l1_loss(ipt, tgt) | |||||
print(loss.numpy()) | print(loss.numpy()) | ||||
Outputs: | Outputs: | ||||
@@ -106,7 +106,7 @@ def square_loss(pred: Tensor, label: Tensor) -> Tensor: | |||||
ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32)) | ipt = mge.tensor(np.array([3, 3, 3, 3]).astype(np.float32)) | ||||
tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32)) | tgt = mge.tensor(np.array([2, 8, 6, 1]).astype(np.float32)) | ||||
loss = F.square_loss(ipt, tgt) | |||||
loss = F.nn.square_loss(ipt, tgt) | |||||
print(loss.numpy()) | print(loss.numpy()) | ||||
Outputs: | Outputs: | ||||
@@ -159,7 +159,7 @@ def cross_entropy( | |||||
label_shape = (1, ) | label_shape = (1, ) | ||||
pred = tensor(np.array([0, 0], dtype=np.float32).reshape(data_shape)) | pred = tensor(np.array([0, 0], dtype=np.float32).reshape(data_shape)) | ||||
label = tensor(np.ones(label_shape, dtype=np.int32)) | label = tensor(np.ones(label_shape, dtype=np.int32)) | ||||
loss = F.cross_entropy(pred, label) | |||||
loss = F.nn.cross_entropy(pred, label) | |||||
print(loss.numpy()) | print(loss.numpy()) | ||||
Outputs: | Outputs: | ||||
@@ -226,7 +226,7 @@ def binary_cross_entropy( | |||||
pred = tensor(np.array([0, 0], dtype=np.float32).reshape(1, 2)) | pred = tensor(np.array([0, 0], dtype=np.float32).reshape(1, 2)) | ||||
label = tensor(np.ones((1, 2), dtype=np.float32)) | label = tensor(np.ones((1, 2), dtype=np.float32)) | ||||
loss = F.binary_cross_entropy(pred, label) | |||||
loss = F.nn.binary_cross_entropy(pred, label) | |||||
print(loss.numpy()) | print(loss.numpy()) | ||||
Outputs: | Outputs: | ||||
@@ -264,7 +264,7 @@ def hinge_loss(pred: Tensor, label: Tensor, norm: str = "L1") -> Tensor: | |||||
pred = tensor([[0.5, -0.5, 0.1], [-0.6, 0.7, 0.8]], dtype="float32") | pred = tensor([[0.5, -0.5, 0.1], [-0.6, 0.7, 0.8]], dtype="float32") | ||||
label = tensor([[1, -1, -1], [-1, 1, 1]], dtype="float32") | label = tensor([[1, -1, -1], [-1, 1, 1]], dtype="float32") | ||||
loss = F.hinge_loss(pred, label) | |||||
loss = F.nn.hinge_loss(pred, label) | |||||
print(loss.numpy()) | print(loss.numpy()) | ||||
Outputs: | Outputs: | ||||
@@ -1522,5 +1522,4 @@ def nms(boxes: Tensor, scores: Tensor, iou_thresh: float) -> Tensor: | |||||
from .loss import * # isort:skip | |||||
from .quantized import conv_bias_activation # isort:skip | |||||
from .loss import * |
@@ -80,7 +80,7 @@ def test_training_converge(): | |||||
def train(data, label): | def train(data, label): | ||||
with gm: | with gm: | ||||
pred = net(data) | pred = net(data) | ||||
loss = F.cross_entropy(pred, label) | |||||
loss = F.nn.cross_entropy(pred, label) | |||||
gm.backward(loss) | gm.backward(loss) | ||||
return loss | return loss | ||||
@@ -92,7 +92,7 @@ class MnistNet(Module): | |||||
def train(data, label, net, opt, gm): | def train(data, label, net, opt, gm): | ||||
with gm: | with gm: | ||||
pred = net(data) | pred = net(data) | ||||
loss = F.cross_entropy(pred, label) | |||||
loss = F.nn.cross_entropy(pred, label) | |||||
gm.backward(loss) | gm.backward(loss) | ||||
return loss | return loss | ||||
@@ -98,7 +98,7 @@ def train(data, label, net, opt, gm): | |||||
opt.clear_grad() | opt.clear_grad() | ||||
with gm: | with gm: | ||||
pred = net(data) | pred = net(data) | ||||
loss = F.cross_entropy(pred, label) | |||||
loss = F.nn.cross_entropy(pred, label) | |||||
gm.backward(loss) | gm.backward(loss) | ||||
opt.step() | opt.step() | ||||
return loss | return loss | ||||
@@ -72,7 +72,7 @@ def test_xornet_trace_dump(): | |||||
with gm: | with gm: | ||||
net.train() | net.train() | ||||
pred = net(data) | pred = net(data) | ||||
loss = F.cross_entropy(pred, label) | |||||
loss = F.nn.cross_entropy(pred, label) | |||||
gm.backward(loss) | gm.backward(loss) | ||||
return pred, loss | return pred, loss | ||||
@@ -80,7 +80,7 @@ def test_xornet_trace_dump(): | |||||
def val_fun(data, label): | def val_fun(data, label): | ||||
net.eval() | net.eval() | ||||
pred = net(data) | pred = net(data) | ||||
loss = F.cross_entropy(pred, label) | |||||
loss = F.nn.cross_entropy(pred, label) | |||||
return pred, loss | return pred, loss | ||||
@trace(symbolic=True, capture_as_const=True) | @trace(symbolic=True, capture_as_const=True) | ||||
@@ -317,14 +317,16 @@ def test_binary_cross_entropy(): | |||||
{"input": [data1, label1], "output": expect1,}, | {"input": [data1, label1], "output": expect1,}, | ||||
{"input": [data2, label2], "output": expect2,}, | {"input": [data2, label2], "output": expect2,}, | ||||
] | ] | ||||
opr_test(cases, F.binary_cross_entropy, compare_fn=compare_fn) | |||||
opr_test(cases, F.nn.binary_cross_entropy, compare_fn=compare_fn) | |||||
cases = [ | cases = [ | ||||
{"input": [sigmoid(data1), label1], "output": expect1,}, | {"input": [sigmoid(data1), label1], "output": expect1,}, | ||||
{"input": [sigmoid(data2), label2], "output": expect2,}, | {"input": [sigmoid(data2), label2], "output": expect2,}, | ||||
] | ] | ||||
opr_test( | opr_test( | ||||
cases, partial(F.binary_cross_entropy, with_logits=False), compare_fn=compare_fn | |||||
cases, | |||||
partial(F.nn.binary_cross_entropy, with_logits=False), | |||||
compare_fn=compare_fn, | |||||
) | ) | ||||
@@ -338,7 +340,7 @@ def test_hinge_loss(): | |||||
expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean() | expect = np.clip(0, np.inf, 1 - data * label).sum(axis=1).mean() | ||||
cases.append({"input": [data, label], "output": expect}) | cases.append({"input": [data, label], "output": expect}) | ||||
opr_test(cases, F.hinge_loss) | |||||
opr_test(cases, F.nn.hinge_loss) | |||||
# cases with L2 norm | # cases with L2 norm | ||||
cases = [] | cases = [] | ||||
@@ -349,7 +351,7 @@ def test_hinge_loss(): | |||||
cases.append({"input": [data, label], "output": expect}) | cases.append({"input": [data, label], "output": expect}) | ||||
def hinge_loss_with_l2_norm(pred, label): | def hinge_loss_with_l2_norm(pred, label): | ||||
return F.hinge_loss(pred, label, "L2") | |||||
return F.nn.hinge_loss(pred, label, "L2") | |||||
opr_test(cases, hinge_loss_with_l2_norm) | opr_test(cases, hinge_loss_with_l2_norm) | ||||
@@ -15,14 +15,14 @@ from megengine import tensor | |||||
def test_cross_entropy_with_logits(): | def test_cross_entropy_with_logits(): | ||||
data = tensor([1, 100]).astype(np.float32).reshape((1, 2)) | data = tensor([1, 100]).astype(np.float32).reshape((1, 2)) | ||||
label = tensor([1]).astype(np.int32) | label = tensor([1]).astype(np.int32) | ||||
loss = F.cross_entropy(data, label) | |||||
loss = F.nn.cross_entropy(data, label) | |||||
np.testing.assert_allclose(loss.numpy(), 0.0) | np.testing.assert_allclose(loss.numpy(), 0.0) | ||||
label = tensor([0]).astype(np.int32) | label = tensor([0]).astype(np.int32) | ||||
loss = F.cross_entropy(data, label) | |||||
loss = F.nn.cross_entropy(data, label) | |||||
np.testing.assert_allclose(loss.numpy(), 100 - 1) | np.testing.assert_allclose(loss.numpy(), 100 - 1) | ||||
label = np.array([1]) | label = np.array([1]) | ||||
loss = F.cross_entropy(data, label) | |||||
loss = F.nn.cross_entropy(data, label) | |||||
np.testing.assert_allclose(loss.numpy(), 0.0) | np.testing.assert_allclose(loss.numpy(), 0.0) | ||||
@@ -41,5 +41,5 @@ def test_cross_entropy(): | |||||
x[i, y[i]] += np.random.rand() * 2 | x[i, y[i]] += np.random.rand() * 2 | ||||
x = softmax(x) | x = softmax(x) | ||||
l_ref = ref(x, y) | l_ref = ref(x, y) | ||||
l = F.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False) | |||||
l = F.nn.cross_entropy(tensor(x, "float32"), tensor(y, "int32"), with_logits=False) | |||||
np.testing.assert_allclose(l.numpy(), l_ref) | np.testing.assert_allclose(l.numpy(), l_ref) |