Browse Source

Fix some errors

pull/4/MERGE
bushuhui 4 years ago
parent
commit
48e49de896
10 changed files with 770 additions and 253 deletions
  1. +10
    -10
      3_kmeans/2-kmeans-color-vq.ipynb
  2. +540
    -9
      4_logistic_regression/1-Least_squares.ipynb
  3. +16
    -15
      4_logistic_regression/2-Logistic_regression.ipynb
  4. +1
    -1
      5_nn/3-softmax_ce.ipynb
  5. +3
    -3
      6_pytorch/0_basic/1-Tensor-and-Variable.ipynb
  6. +28
    -38
      6_pytorch/0_basic/2-autograd.ipynb
  7. +92
    -97
      6_pytorch/1_NN/1-linear-regression-gradient-descend.ipynb
  8. +4
    -3
      6_pytorch/1_NN/2-logistic-regression.ipynb
  9. +76
    -76
      6_pytorch/backup/PyTorch_quick_intro.ipynb
  10. +0
    -1
      README.md

+ 10
- 10
3_kmeans/2-kmeans-color-vq.ipynb
File diff suppressed because it is too large
View File


+ 540
- 9
4_logistic_regression/1-Least_squares.ipynb
File diff suppressed because it is too large
View File


+ 16
- 15
4_logistic_regression/2-Logistic_regression.ipynb
File diff suppressed because it is too large
View File


+ 1
- 1
5_nn/3-softmax_ce.ipynb View File

@@ -168,7 +168,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
"version": "3.6.9"
}
},
"nbformat": 4,


+ 3
- 3
6_pytorch/0_basic/1-Tensor-and-Variable.ipynb
File diff suppressed because it is too large
View File


+ 28
- 38
6_pytorch/0_basic/2-autograd.ipynb View File

@@ -10,7 +10,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
@@ -28,7 +28,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 2,
"metadata": {},
"outputs": [
{
@@ -66,7 +66,7 @@
},
{
"cell_type": "code",
"execution_count": 4,
"execution_count": 3,
"metadata": {},
"outputs": [
{
@@ -417,17 +417,14 @@
},
{
"cell_type": "code",
"execution_count": 12,
"execution_count": 4,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Variable containing:\n",
" 18\n",
"[torch.FloatTensor of size 1]\n",
"\n"
"tensor([18.], grad_fn=<AddBackward0>)\n"
]
}
],
@@ -439,7 +436,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
@@ -448,17 +445,14 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Variable containing:\n",
" 8\n",
"[torch.FloatTensor of size 1]\n",
"\n"
"tensor([8.])\n"
]
}
],
@@ -468,10 +462,8 @@
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": true
},
"execution_count": 7,
"metadata": {},
"outputs": [],
"source": [
"y.backward() # 再做一次自动求导,这次不保留计算图"
@@ -479,17 +471,14 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 8,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Variable containing:\n",
" 16\n",
"[torch.FloatTensor of size 1]\n",
"\n"
"tensor([16.])\n"
]
}
],
@@ -564,7 +553,7 @@
},
{
"cell_type": "code",
"execution_count": 38,
"execution_count": 21,
"metadata": {},
"outputs": [],
"source": [
@@ -577,25 +566,27 @@
},
{
"cell_type": "code",
"execution_count": 39,
"execution_count": 18,
"metadata": {},
"outputs": [],
"source": [
"# k.backward(torch.ones_like(k)) \n",
"# print(x.grad)\n",
"#k.backward(torch.ones_like(k)) \n",
"#print(x.grad)\n",
"# 和上一个的区别在于该算法是求得导数和,并不是分布求解。"
]
},
{
"cell_type": "code",
"execution_count": 40,
"execution_count": 22,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([13., 13.], grad_fn=<CopySlices>)\n"
"tensor([13., 13.], grad_fn=<CopySlices>)\n",
"tensor([4., 3.])\n",
"tensor([2., 6.])\n"
]
}
],
@@ -604,16 +595,18 @@
"k.backward(torch.FloatTensor([1, 0]), retain_graph=True)\n",
"print(k)\n",
"j[0] = x.grad.data\n",
"print(x.grad.data)\n",
"\n",
"x.grad.data.zero_() # 归零之前求得的梯度\n",
"\n",
"k.backward(torch.FloatTensor([0, 1]))\n",
"j[1] = x.grad.data"
"j[1] = x.grad.data\n",
"print(x.grad.data)\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 12,
"metadata": {},
"outputs": [
{
@@ -630,18 +623,15 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 13,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
" 4 3\n",
" 2 6\n",
"[torch.FloatTensor of size 2x2]\n",
"\n"
"tensor([[4., 3.],\n",
" [2., 6.]])\n"
]
}
],
@@ -673,7 +663,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
"version": "3.6.9"
}
},
"nbformat": 4,


+ 92
- 97
6_pytorch/1_NN/1-linear-regression-gradient-descend.ipynb
File diff suppressed because it is too large
View File


+ 4
- 3
6_pytorch/1_NN/2-logistic-regression.ipynb View File

@@ -387,7 +387,7 @@
"可以看到分类效果基本是混乱的,我们来计算一下 loss,公式如下\n",
"\n",
"$$\n",
"loss = -(y * log(\\hat{y}) + (1 - y) * log(1 - \\hat{y}))\n",
"loss = -\\{ y * log(\\hat{y}) + (1 - y) * log(1 - \\hat{y}) \\}\n",
"$$"
]
},
@@ -399,7 +399,8 @@
"source": [
"# 计算loss, 使用clamp的目的是防止数据过小而对结果产生较大影响。\n",
"def binary_loss(y_pred, y):\n",
" logits = (y * y_pred.clamp(1e-12).log() + (1 - y) * (1 - y_pred).clamp(1e-12).log()).mean()\n",
" logits = (y * y_pred.clamp(1e-12).log() + \\\n",
" (1 - y) * (1 - y_pred).clamp(1e-12).log()).mean()\n",
" return -logits"
]
},
@@ -781,7 +782,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
"version": "3.6.9"
}
},
"nbformat": 4,


6_pytorch/PyTorch_quick_intro.ipynb → 6_pytorch/backup/PyTorch_quick_intro.ipynb View File

@@ -41,11 +41,11 @@
{
"data": {
"text/plain": [
"tensor([[ 1.2516e-36, 0.0000e+00, 2.3822e-44],\n",
" [ 0.0000e+00, nan, 4.5743e-41],\n",
" [ 1.3733e-14, 1.8888e+31, 4.9656e+28],\n",
" [ 4.5439e+30, -4.2010e+25, 4.5743e-41],\n",
" [-4.2210e+25, 4.5743e-41, -4.2210e+25]])"
"tensor([[0., 0., 0.],\n",
" [0., 0., 0.],\n",
" [0., 0., 0.],\n",
" [0., 0., 0.],\n",
" [0., 0., 0.]])"
]
},
"execution_count": 2,
@@ -67,11 +67,11 @@
{
"data": {
"text/plain": [
"tensor([[0.1878, 0.1306, 0.1593],\n",
" [0.2964, 0.3927, 0.7782],\n",
" [0.8448, 0.4487, 0.7916],\n",
" [0.0550, 0.7300, 0.2901],\n",
" [0.8453, 0.8734, 0.2627]])"
"tensor([[0.3807, 0.4897, 0.0356],\n",
" [0.6701, 0.0606, 0.1818],\n",
" [0.8798, 0.7115, 0.8265],\n",
" [0.4094, 0.2264, 0.2041],\n",
" [0.9088, 0.9256, 0.3438]])"
]
},
"execution_count": 3,
@@ -122,20 +122,20 @@
},
{
"cell_type": "code",
"execution_count": 7,
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[0.9549, 1.0717, 0.4005],\n",
" [0.8394, 0.7862, 0.8726],\n",
" [1.4099, 1.3137, 1.1250],\n",
" [0.4830, 0.8297, 0.5617],\n",
" [0.9343, 0.9557, 0.9178]])"
"tensor([[1.1361, 1.4054, 0.9468],\n",
" [1.6410, 0.5193, 0.3720],\n",
" [0.9482, 1.6716, 1.4168],\n",
" [1.3925, 0.9253, 0.2908],\n",
" [1.4907, 1.7178, 0.7246]])"
]
},
"execution_count": 7,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
@@ -154,11 +154,11 @@
{
"data": {
"text/plain": [
"tensor([[0.5021, 1.2500, 1.4749],\n",
" [0.6019, 0.9378, 1.7240],\n",
" [1.2752, 1.3837, 0.6832],\n",
" [1.2053, 1.4374, 1.5160],\n",
" [0.9404, 0.8743, 0.8164]])"
"tensor([[1.1361, 1.4054, 0.9468],\n",
" [1.6410, 0.5193, 0.3720],\n",
" [0.9482, 1.6716, 1.4168],\n",
" [1.3925, 0.9253, 0.2908],\n",
" [1.4907, 1.7178, 0.7246]])"
]
},
"execution_count": 6,
@@ -173,20 +173,20 @@
},
{
"cell_type": "code",
"execution_count": 8,
"execution_count": 7,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[0.9549, 1.0717, 0.4005],\n",
" [0.8394, 0.7862, 0.8726],\n",
" [1.4099, 1.3137, 1.1250],\n",
" [0.4830, 0.8297, 0.5617],\n",
" [0.9343, 0.9557, 0.9178]])"
"tensor([[1.1361, 1.4054, 0.9468],\n",
" [1.6410, 0.5193, 0.3720],\n",
" [0.9482, 1.6716, 1.4168],\n",
" [1.3925, 0.9253, 0.2908],\n",
" [1.4907, 1.7178, 0.7246]])"
]
},
"execution_count": 8,
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
@@ -200,7 +200,7 @@
},
{
"cell_type": "code",
"execution_count": 9,
"execution_count": 8,
"metadata": {},
"outputs": [
{
@@ -208,23 +208,23 @@
"output_type": "stream",
"text": [
"最初y\n",
"tensor([[0.7671, 0.9411, 0.2411],\n",
" [0.5430, 0.3935, 0.0944],\n",
" [0.5652, 0.8650, 0.3334],\n",
" [0.4280, 0.0997, 0.2716],\n",
" [0.0890, 0.0823, 0.6551]])\n",
"tensor([[0.7554, 0.9157, 0.9113],\n",
" [0.9709, 0.4587, 0.1902],\n",
" [0.0684, 0.9601, 0.5903],\n",
" [0.9831, 0.6989, 0.0867],\n",
" [0.5819, 0.7923, 0.3808]])\n",
"第一种加法,y的结果\n",
"tensor([[0.7671, 0.9411, 0.2411],\n",
" [0.5430, 0.3935, 0.0944],\n",
" [0.5652, 0.8650, 0.3334],\n",
" [0.4280, 0.0997, 0.2716],\n",
" [0.0890, 0.0823, 0.6551]])\n",
"tensor([[0.7554, 0.9157, 0.9113],\n",
" [0.9709, 0.4587, 0.1902],\n",
" [0.0684, 0.9601, 0.5903],\n",
" [0.9831, 0.6989, 0.0867],\n",
" [0.5819, 0.7923, 0.3808]])\n",
"第二种加法,y的结果\n",
"tensor([[0.9549, 1.0717, 0.4005],\n",
" [0.8394, 0.7862, 0.8726],\n",
" [1.4099, 1.3137, 1.1250],\n",
" [0.4830, 0.8297, 0.5617],\n",
" [0.9343, 0.9557, 0.9178]])\n"
"tensor([[1.1361, 1.4054, 0.9468],\n",
" [1.6410, 0.5193, 0.3720],\n",
" [0.9482, 1.6716, 1.4168],\n",
" [1.3925, 0.9253, 0.2908],\n",
" [1.4907, 1.7178, 0.7246]])\n"
]
}
],
@@ -250,16 +250,16 @@
},
{
"cell_type": "code",
"execution_count": 10,
"execution_count": 9,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([0.1306, 0.3927, 0.4487, 0.7300, 0.8734])"
"tensor([0.4897, 0.0606, 0.7115, 0.2264, 0.9256])"
]
},
"execution_count": 10,
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
@@ -301,7 +301,7 @@
},
{
"cell_type": "code",
"execution_count": 13,
"execution_count": 12,
"metadata": {},
"outputs": [
{
@@ -310,7 +310,7 @@
"array([1., 1., 1., 1., 1.], dtype=float32)"
]
},
"execution_count": 13,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
@@ -322,7 +322,7 @@
},
{
"cell_type": "code",
"execution_count": 14,
"execution_count": 13,
"metadata": {},
"outputs": [
{
@@ -351,7 +351,7 @@
},
{
"cell_type": "code",
"execution_count": 15,
"execution_count": 14,
"metadata": {},
"outputs": [
{
@@ -378,18 +378,18 @@
},
{
"cell_type": "code",
"execution_count": 16,
"execution_count": 15,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[1.1427, 1.2022, 0.5598],\n",
" [1.1357, 1.1790, 1.6507],\n",
" [2.2547, 1.7623, 1.9165],\n",
" [0.5381, 1.5597, 0.8518],\n",
" [1.7796, 1.8291, 1.1805]], device='cuda:0')\n"
"tensor([[1.5168, 1.8951, 0.9824],\n",
" [2.3111, 0.5800, 0.5538],\n",
" [1.8280, 2.3831, 2.2433],\n",
" [1.8020, 1.1518, 0.4949],\n",
" [2.3995, 2.6434, 1.0684]], device='cuda:0')\n"
]
}
],
@@ -431,7 +431,7 @@
},
{
"cell_type": "code",
"execution_count": 17,
"execution_count": 16,
"metadata": {},
"outputs": [],
"source": [
@@ -440,7 +440,7 @@
},
{
"cell_type": "code",
"execution_count": 18,
"execution_count": 17,
"metadata": {
"scrolled": true
},
@@ -452,7 +452,7 @@
" [1., 1.]], requires_grad=True)"
]
},
"execution_count": 18,
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
}
@@ -465,7 +465,7 @@
},
{
"cell_type": "code",
"execution_count": 19,
"execution_count": 18,
"metadata": {
"scrolled": true
},
@@ -476,7 +476,7 @@
"tensor(4., grad_fn=<SumBackward0>)"
]
},
"execution_count": 19,
"execution_count": 18,
"metadata": {},
"output_type": "execute_result"
}
@@ -488,16 +488,16 @@
},
{
"cell_type": "code",
"execution_count": 20,
"execution_count": 19,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"<SumBackward0 at 0x7f8381c656d8>"
"<SumBackward0 at 0x7f0158d2f198>"
]
},
"execution_count": 20,
"execution_count": 19,
"metadata": {},
"output_type": "execute_result"
}
@@ -508,7 +508,7 @@
},
{
"cell_type": "code",
"execution_count": 21,
"execution_count": 20,
"metadata": {},
"outputs": [],
"source": [
@@ -517,7 +517,7 @@
},
{
"cell_type": "code",
"execution_count": 22,
"execution_count": 21,
"metadata": {},
"outputs": [
{
@@ -527,7 +527,7 @@
" [1., 1.]])"
]
},
"execution_count": 22,
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
@@ -547,7 +547,7 @@
},
{
"cell_type": "code",
"execution_count": 23,
"execution_count": 22,
"metadata": {},
"outputs": [
{
@@ -557,7 +557,7 @@
" [2., 2.]])"
]
},
"execution_count": 23,
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
@@ -569,7 +569,7 @@
},
{
"cell_type": "code",
"execution_count": 24,
"execution_count": 23,
"metadata": {
"scrolled": true
},
@@ -581,7 +581,7 @@
" [3., 3.]])"
]
},
"execution_count": 24,
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
@@ -644,7 +644,7 @@
},
{
"cell_type": "code",
"execution_count": 27,
"execution_count": 28,
"metadata": {},
"outputs": [
{
@@ -666,7 +666,7 @@
" [0.5403, 0.5403, 0.5403, 0.5403, 0.5403]])"
]
},
"execution_count": 27,
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
@@ -1463,7 +1463,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
"version": "3.6.9"
}
},
"nbformat": 4,

+ 0
- 1
README.md View File

@@ -35,7 +35,6 @@
- [Softmax & cross-entroy](5_nn/3-softmax_ce.ipynb)
8. [PyTorch](6_pytorch/)
- Basic
- [short tutorial](6_pytorch/PyTorch_quick_intro.ipynb)
- [basic/Tensor-and-Variable](6_pytorch/0_basic/1-Tensor-and-Variable.ipynb)
- [basic/autograd](6_pytorch/0_basic/2-autograd.ipynb)
- [basic/dynamic-graph](6_pytorch/0_basic/3-dynamic-graph.ipynb)


Loading…
Cancel
Save