{"id":2888,"date":"2020-02-08T14:11:11","date_gmt":"2020-02-08T06:11:11","guid":{"rendered":"http:\/\/www.sniper97.cn\/?p=2888"},"modified":"2020-02-08T14:11:11","modified_gmt":"2020-02-08T06:11:11","slug":"%e3%80%90%e5%90%b4%e6%81%a9%e8%be%be%e6%b7%b1%e5%ba%a6%e5%ad%a6%e4%b9%a0%e3%80%91%e6%b7%b1%e5%ba%a6%e7%a5%9e%e7%bb%8f%e7%bd%91%e7%bb%9c","status":"publish","type":"post","link":"http:\/\/www.sniper97.cn\/index.php\/note\/deep-learning\/2888\/","title":{"rendered":"\u3010\u5434\u6069\u8fbe\u6df1\u5ea6\u5b66\u4e60\u3011\u6df1\u5ea6\u795e\u7ecf\u7f51\u7edc"},"content":{"rendered":"\n<p>\u5434\u6069\u8fbe\u6df1\u5ea6\u5b66\u4e60\u7b2c\u4e00\u8bfe\u7b2c\u56db\u5468 \u6df1\u5ea6\u5b66\u4e60\u7f51\u7edc<\/p>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"301\" height=\"169\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-31.png\" alt=\"\" class=\"wp-image-2892\"\/><\/figure><\/div>\n\n\n<h2 class=\"wp-block-heading\">1.\u4ec0\u4e48\u662f\u6df1\u5c42\u795e\u7ecf\u7f51\u7edc<\/h2>\n\n\n<p>\u5982\u4e0b\u56fe\uff0c\u5206\u522b\u662f\u903b\u8f91\u56de\u5f52\uff0c\u6709\u4e00\u4e2a\u9690\u85cf\u5c42\u7684\u795e\u7ecf\u7f51\u7edc\uff0c\u6709\u4e24\u4e2a\u9690\u85cf\u5c42\u7684\u795e\u7ecf\u7f51\u7edc\uff0c\u6709\u4e94\u4e2a\u9690\u85cf\u5c42\u7684\u795e\u7ecf\u7f51\u7edc\u3002<\/p>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"979\" height=\"445\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-28.png\" alt=\"\" class=\"wp-image-2889\" srcset=\"http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-28.png 979w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-28-300x136.png 300w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-28-768x349.png 768w\" sizes=\"(max-width: 979px) 100vw, 979px\" \/><\/figure><\/div>\n\n\n<p>\u6211\u4eec\u4e00\u822c\u79f0\u6709\u4e00\u4e2a\u4ee5\u4e0a\u9690\u85cf\u5c42\u7684\u7f51\u7edc\u4e3a\u6df1\u5c42\u795e\u7ecf\u7f51\u7edc\u3002<\/p>\n\n\n<p>\u6211\u4eec\u5728\u9009\u62e9\u6a21\u578b\u7684\u65f6\u5019\uff0c\u4e00\u822c\u5148\u4f7f\u7528\u903b\u8f91\u56de\u5f52\uff0c\u7136\u540e\u53cc\u5c42\u795e\u7ecf\u7f51\u7edc\u3001\u4e09\u5c42\u795e\u7ecf\u7f51\u7edc\u4ee5\u6b64\u7c7b\u63a8\uff0c\u628a\u9690\u85cf\u5c42\u5c42\u6570\u4f5c\u4e3a\u4e00\u4e2a\u53ef\u4ee5\u81ea\u7531\u9009\u62e9\u5927\u5c0f\u7684\u8d85\u53c2\u6765\u6539\u53d8\u3002 <\/p>\n\n\n<h2 class=\"wp-block-heading\">2. \u6df1\u5c42\u795e\u7ecf\u7f51\u7edc\u7684\u7b26\u53f7\u8868\u793a <\/h2>\n\n\n<p>\u548c\u666e\u901a\u7684\u795e\u7ecf\u7f51\u7edc\u8868\u793a\u57fa\u672c\u4e00\u6837\uff0c\u70b9\u4e86\u4e2a\u5c42\u6570L\u3002<\/p>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"901\" height=\"438\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-29.png\" alt=\"\" class=\"wp-image-2890\" srcset=\"http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-29.png 901w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-29-300x146.png 300w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-29-768x373.png 768w\" sizes=\"(max-width: 901px) 100vw, 901px\" \/><\/figure><\/div>\n\n\n<h2 class=\"wp-block-heading\">3.\u53c2\u6570VS\u8d85\u53c2<\/h2>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"821\" height=\"327\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-30.png\" alt=\"\" class=\"wp-image-2891\" srcset=\"http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-30.png 821w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-30-300x119.png 300w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-30-768x306.png 768w\" sizes=\"(max-width: 821px) 100vw, 821px\" \/><\/figure><\/div>\n\n\n<p> \u4e4b\u540e\u6211\u4eec\u6211\u4eec\u8fd8\u4f1a\u63a5\u89e6\u4e00\u4e9b\u5176\u4ed6\u7684\u8d85\u53c2\uff0c\u6bd4\u5982monmentum\u3001mini batch size\u3001\u4e0d\u540c\u7684\u6b63\u5219\u5316\u53c2\u6570\u7b49\u7b49\u3002 <\/p>\n\n\n<p><\/p>\n\n\n<h2 class=\"wp-block-heading\">\u6d4b\u9a8c<\/h2>\n\n\n<p><strong>1. \u5728\u5b9e\u73b0\u524d\u5411\u4f20\u64ad\u548c\u53cd\u5411\u4f20\u64ad\u4e2d\u4f7f\u7528\u7684\u201ccache\u201d\u662f\u4ec0\u4e48\uff1f <\/strong><\/p>\n\n\n<ol><li>\u7528\u4e8e\u5728\u8bad\u7ec3\u671f\u95f4\u7f13\u5b58\u6210\u672c\u51fd\u6570\u7684\u4e2d\u95f4\u503c\u3002<\/li><li>\u6211\u4eec\u7528\u5b83\u4f20\u9012\u524d\u5411\u4f20\u64ad\u4e2d\u8ba1\u7b97\u7684\u53d8\u91cf\u5230\u76f8\u5e94\u7684\u53cd\u5411\u4f20\u64ad\u6b65\u9aa4\uff0c\u5b83\u5305\u542b\u7528\u4e8e\u8ba1\u7b97\u5bfc\u6570\u7684\u53cd\u5411\u4f20\u64ad\u7684\u6709\u7528\u503c\u3002<\/li><li>\u5b83\u7528\u4e8e\u8ddf\u8e2a\u6211\u4eec\u6b63\u5728\u641c\u7d22\u7684\u8d85\u53c2\u6570\uff0c\u4ee5\u52a0\u901f\u8ba1\u7b97\u3002<\/li><li>\u6211\u4eec\u4f7f\u7528\u5b83\u5c06\u5411\u540e\u4f20\u64ad\u8ba1\u7b97\u7684\u53d8\u91cf\u4f20\u9012\u7ed9\u76f8\u5e94\u7684\u6b63\u5411\u4f20\u64ad\u6b65\u9aa4\uff0c\u5b83\u5305\u542b\u7528\u4e8e\u8ba1\u7b97\u8ba1\u7b97\u6fc0\u6d3b\u7684\u6b63\u5411\u4f20\u64ad\u7684\u6709\u7528\u503c\u3002 <\/li><\/ol>\n\n\n<p>2\u3002<\/p>\n\n\n<p><strong>2. \u4ee5\u4e0b\u54ea\u4e9b\u662f\u201c\u8d85\u53c2\u6570\u201d\uff1f  <\/strong><\/p>\n\n\n<ol><li> \u9690\u85cf\u5c42\u7684\u5927\u5c0f<em>n<\/em>[<em>l<\/em>] <\/li><li>\u5b66\u4e60\u7387\u03b1 <\/li><li>\u8fed\u4ee3\u6b21\u6570 <\/li><li>\u795e\u7ecf\u7f51\u7edc\u4e2d\u7684\u5c42\u6570L <\/li><\/ol>\n\n\n<p>1\u30012\u30013\u30014\u3002<\/p>\n\n\n<p><strong> 3.\u4e0b\u5217\u54ea\u4e2a\u8bf4\u6cd5\u662f\u6b63\u786e\u7684\uff1f <\/strong><\/p>\n\n\n<ol><li> \u795e\u7ecf\u7f51\u7edc\u7684\u66f4\u6df1\u5c42\u901a\u5e38\u6bd4\u524d\u9762\u7684\u5c42\u8ba1\u7b97\u66f4\u590d\u6742\u7684\u8f93\u5165\u7279\u5f81\u3002<\/li><li>\u795e\u7ecf\u7f51\u7edc\u7684\u524d\u9762\u7684\u5c42\u901a\u5e38\u6bd4\u66f4\u6df1\u5c42\u8ba1\u7b97\u8f93\u5165\u7684\u66f4\u590d\u6742\u7684\u7279\u6027\u3002<\/li><\/ol>\n\n\n<p>1\u3002<\/p>\n\n\n<p><strong>4. \u5411\u91cf\u5316\u5141\u8bb8\u60a8\u5728L\u5c42\u795e\u7ecf\u7f51\u7edc\u4e2d\u8ba1\u7b97\u524d\u5411\u4f20\u64ad\uff0c\u800c\u4e0d\u9700\u8981\u5728\u5c42(l = 1,2\uff0c\u2026\uff0cL)\u4e0a\u663e\u5f0f\u7684\u4f7f\u7528for-loop\uff08\u6216\u4efb\u4f55\u5176\u4ed6\u663e\u5f0f\u8fed\u4ee3\u5faa\u73af\uff09\uff0c\u6b63\u786e\u5417\uff1f <\/strong><\/p>\n\n\n<p>\u9519\u8bef\u3002 \u5c42\u95f4\u8ba1\u7b97\u4e2d\uff0c\u6211\u4eec\u4e0d\u80fd\u907f\u514dfor\u5faa\u73af\u8fed\u4ee3\u3002 <\/p>\n\n\n<p><strong>5. \u5047\u8bbe\u6211\u4eec\u5c06<\/strong><em><strong>n<\/strong><\/em><strong>[<\/strong><em><strong>l<\/strong><\/em><strong>]\u7684\u503c\u5b58\u50a8\u5728\u540d\u4e3alayers\u7684\u6570\u7ec4\u4e2d\uff0c\u5982\u4e0b\u6240\u793a\uff1alayer_dims = [n_x,4,3,2,1]\u3002 \u56e0\u6b64\uff0c\u7b2c1\u5c42\u6709\u56db\u4e2a\u9690\u85cf\u5355\u5143\uff0c\u7b2c2\u5c42\u6709\u4e09\u4e2a\u9690\u85cf\u5355\u5143\uff0c\u4f9d\u6b64\u7c7b\u63a8\u3002 \u60a8\u53ef\u4ee5\u4f7f\u7528\u54ea\u4e2afor\u5faa\u73af\u521d\u59cb\u5316\u6a21\u578b\u53c2\u6570\uff1f <\/strong><\/p>\n\n\n<pre class=\"wp-block-code\"><code>for(i in range(1, len(layer_dims))):\n     parameter[\u2018W\u2019 + str(i)] = np.random.randn(layers[i], layers[i - 1])) * 0.01\n     parameter[\u2018b\u2019 + str(i)] = np.zeros((layers_dims[l], 1))<\/code><\/pre>\n\n\n<p><strong>6. \u5c42\u6570L\u4e3a4\uff0c\u9690\u85cf\u5c42\u6570\u4e3a3\u6b63\u786e\u4e48\uff1f <\/strong><\/p>\n\n\n<p>\u6b63\u786e\u3002<\/p>\n\n\n<p><strong>7. \u5728\u524d\u5411\u4f20\u64ad\u671f\u95f4\uff0c\u5728\u5c42<\/strong><em><strong>l<\/strong><\/em><strong>\u7684\u524d\u5411\u4f20\u64ad\u51fd\u6570\u4e2d\uff0c\u60a8\u9700\u8981\u77e5\u9053\u5c42<\/strong><em><strong>l<\/strong><\/em><strong>\u4e2d\u7684\u6fc0\u6d3b\u51fd\u6570\uff08Sigmoid\uff0ctanh\uff0cReLU\u7b49\uff09\u662f\u4ec0\u4e48\uff0c \u5728\u53cd\u5411\u4f20\u64ad\u671f\u95f4\uff0c\u76f8\u5e94\u7684\u53cd\u5411\u4f20\u64ad\u51fd\u6570\u4e5f\u9700\u8981\u77e5\u9053\u7b2c<\/strong><em><strong>l<\/strong><\/em><strong>\u5c42\u7684\u6fc0\u6d3b\u51fd\u6570\u662f\u4ec0\u4e48\uff0c\u56e0\u4e3a\u68af\u5ea6\u662f\u6839\u636e\u5b83\u6765\u8ba1\u7b97\u7684\uff0c\u6b63\u786e\u5417\uff1f <\/strong><\/p>\n\n\n<p>\u6b63\u786e\u3002\u53cd\u5411\u4f20\u64ad\u4e2d\u9700\u8981\u77e5\u9053\u4f7f\u7528\u54ea\u79cd\u6fc0\u6d3b\u51fd\u6570\u624d\u80fd\u6b63\u786e\u7684\u8ba1\u7b97\u5bfc\u51fd\u6570\u3002<\/p>\n\n\n<p><strong>8.\u4f7f\u7528\u6d45\u7f51\u7edc\u7535\u8def\u8ba1\u7b97\u51fd\u6570\u65f6\uff0c\u9700\u8981\u4e00\u4e2a\u5927\u7f51\u7edc\uff08\u6211\u4eec\u901a\u8fc7\u7f51\u7edc\u4e2d\u7684\u903b\u8f91\u95e8\u6570\u91cf\u6765\u5ea6\u91cf\u5927\u5c0f\uff09\uff0c\u4f46\u662f\u4f7f\u7528\u6df1\u7f51\u7edc\u7535\u8def\u6765\u8ba1\u7b97\u5b83\uff0c\u53ea\u9700\u8981\u4e00\u4e2a\u6307\u6570\u8f83\u5c0f\u7684\u7f51\u7edc\u3002\u771f\/\u5047\uff1f<\/strong><\/p>\n\n\n<p>\u6b63\u786e\u3002<\/p>\n\n\n<h2 class=\"wp-block-heading\">\u7f16\u7a0b\u4f5c\u4e1a<\/h2>\n\n\n<p>\u6b65\u9aa4\uff1a<\/p>\n\n\n<p><strong>1.\u521d\u59cb\u5316\u7f51\u7edc\u53c2\u6570<\/strong><\/p>\n\n\n<p><strong>2.\u524d\u5411\u4f20\u64ad<\/strong><\/p>\n\n\n<p>2.1 \u8ba1\u7b97\u4e00\u5c42\u7684\u4e2d\u7ebf\u6027\u6c42\u548c\u7684\u90e8\u5206<\/p>\n\n\n<p>2.2 \u8ba1\u7b97\u6fc0\u6d3b\u51fd\u6570\u7684\u90e8\u5206\uff08ReLU\u4f7f\u7528L-1\u6b21\uff0cSigmod\u4f7f\u75281\u6b21\uff09<\/p>\n\n\n<p>2.3 \u7ed3\u5408\u7ebf\u6027\u6c42\u548c\u4e0e\u6fc0\u6d3b\u51fd\u6570<\/p>\n\n\n<p><strong>3.\u8ba1\u7b97\u8bef\u5dee<\/strong><\/p>\n\n\n<p><strong>4.\u53cd\u5411\u4f20\u64ad<\/strong><\/p>\n\n\n<p>4.1 \u7ebf\u6027\u90e8\u5206\u7684\u53cd\u5411\u4f20\u64ad\u516c\u5f0f<\/p>\n\n\n<p>4.2 \u6fc0\u6d3b\u51fd\u6570\u90e8\u5206\u7684\u53cd\u5411\u4f20\u64ad\u516c\u5f0f<\/p>\n\n\n<p>4.3 \u7ed3\u5408\u7ebf\u6027\u90e8\u5206\u4e0e\u6fc0\u6d3b\u51fd\u6570\u7684\u53cd\u5411\u4f20\u64ad\u516c\u5f0f<\/p>\n\n\n<p><strong>5.\u66f4\u65b0\u53c2\u6570<\/strong><\/p>\n\n\n<p>\u9996\u5148\u6211\u4eec\u5bfc\u5305\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">import numpy as np<br \/>import h5py<br \/>import matplotlib.pyplot as plt<br \/>from course_1_week_4 import testCases<br \/>from course_1_week_4.dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward<br \/>from course_1_week_4 import lr_utils<\/pre>\n\n\n<p>\u5728\u8fd9\u4e00\u8bfe\uff0c\u5b9e\u73b0\u4e24\u79cd\u795e\u7ecf\u7f51\u7edc\uff0c\u4e00\u4e2a\u662f\u4e0a\u4e00\u5468\u7684\u53cc\u5c42\u795e\u7ecf\u7f51\u7edc\uff0c\u4e00\u4e2a\u662f\u8fd9\u4e00\u5468\u7684\u6df1\u5c42\u795e\u7ecf\u7f51\u7edc\uff0c\u56e0\u6b64\u6211\u4eec\u9996\u5148\u5199\u4e00\u4e2a\u5bf9\u53cc\u5c42\u795e\u7ecf\u7f51\u7edc\u7684\u53c2\u6570\u521d\u59cb\u5316\u65b9\u6cd5\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def init_two_layer_parameters(n_x, n_h, n_y):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u968f\u673a\u521d\u59cb\u5316\u53c2\u6570\uff08\u4e00\u4e2a\u4e24\u5c42\u7684\u795e\u7ecf\u7f51\u7edc\uff09<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> n_x: <\/em><em>\u8f93\u5165\u5c42\u7ed3\u70b9\u4e2a\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> n_h: <\/em><em>\u9690\u85cf\u5c42\u7ed3\u70b9\u4e2a\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> n_y: <\/em><em>\u8f93\u51fa\u5c42\u7ed3\u70b9\u4e2a\u6570<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>W1 = np.random.randn(n_h, n_x) * 0.01<br \/>    b1 = np.zeros((n_h, 1))<br \/>    W2 = np.random.randn(n_y, n_h) * 0.01<br \/>    b2 = np.zeros((n_y, 1))<br \/><br \/>    parameters = {<br \/>        'W1': W1,<br \/>        'b1': b1,<br \/>        'W2': W2,<br \/>        'b2': b2<br \/>    }<br \/>    return parameters<\/pre>\n\n\n<p>\u540c\u6837\u5728\u5199\u4e00\u4e2a\u6df1\u5c42\u795e\u7ecf\u7f51\u7edc\u7684\u53c2\u6570\u521d\u59cb\u5316\uff08\u5f53\u7136\uff0c\u6df1\u5c42\u53ef\u4ee5\u521d\u59cb\u5316\u4e24\u5c42\u7684\uff09\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def init_deep_layers_parameters(layers_dim):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u521d\u59cb\u5316\u591a\u5c42\u795e\u7ecf\u7f51\u7edc\u53c2\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> layers_dim: <\/em><em>\u6bcf\u5c42\u8282\u70b9\u6570\u7684\u5217\u8868<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>parameters = {}<br \/>    for i in range(1, len(layers_dim)):<br \/>        parameters['W' + str(i)] = np.random.randn(layers_dim[i], layers_dim[i - 1]) * 0.01<br \/>        parameters['b' + str(i)] = np.zeros((layers_dim[i], 1))<br \/><br \/>    return parameters<\/pre>\n\n\n<p>\u5b8c\u6210\u4e86\u53c2\u6570\u7684\u521d\u59cb\u5316\u6211\u4eec\u5c31\u5f00\u59cb\u8fdb\u884c\u524d\u5411\u4f20\u64ad\u76f8\u5173\u7684\u4ee3\u7801\u3002<\/p>\n\n\n<p>\u9996\u5148\u662f\u7ebf\u6027\u4f20\u64ad\u7684\u65b9\u6cd5\uff0c\u53ea\u8ba1\u7b97wx+b\u800c\u4e0d\u8fdb\u884c\u6fc0\u6d3b\uff0c\u540c\u65f6\u5c06A,W,b\u4f5c\u4e3a\u7f13\u5b58\u8fd4\u56de<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def linear_forward(A, W, b):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u524d\u5411\u4f20\u64ad\uff08\u53ea\u7b97<\/em><em>wx+b<\/em><em>\uff0c\u4e0d\u6fc0\u6d3b\uff09<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> A:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> W:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> b:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>Z = np.dot(W, A) + b<br \/>    cache = (A, W, b)<br \/>    return Z, cache<\/pre>\n\n\n<p>\u7136\u540e\u662f\u6fc0\u6d3b\u51fd\u6570\uff0c\u5728\u8fd9\u91cc\u8fdb\u884c\u5206\u7c7b\uff0c\u56e0\u4e3a\u6211\u4eec\u4e8c\u5206\u7c7b\u7684\u795e\u7ecf\u7f51\u7edc\u6700\u540e\u4e00\u5c42\u4f7f\u7528\u7684\u662fsigmoid\u6fc0\u6d3b\uff0c\u800c\u5728\u8fd9\u4e4b\u524d\u6211\u4eec\u90fd\u5728\u4f7f\u7528ReLU\u8fdb\u884c\u6fc0\u6d3b\u3002\u540c\u65f6\u5c06\u6fc0\u6d3b\u524d\u7684\u7ed3\u679c\u548cx\u3001w\u3001b\u4f5c\u4e3a\u7f13\u5b58\u8fd4\u56de\uff08x\u56e0\u4e3a\u662f\u5f53\u524d\u5c42\u7684\u7ed3\u679c\uff0c\u4e0b\u4e00\u5c42\u7684\u8f93\u5165\uff0c\u5728\u8ba1\u7b97\u4e0b\u4e00\u5c42\u7684\u65f6\u5019\u548c\u53cd\u5411\u4f20\u64ad\u65f6\u9700\u8981\u7528\u5230\uff0c\u56e0\u6b64\u7f13\u5b58\u3002\u53e6\u5916\u4e24\u4e2a\u672c\u8eab\u5c31\u662f\u53c2\u6570\uff0c\u5fc5\u7136\u7f13\u5b58\uff09\u3002<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def linear_activation_forward(A_prev, W, b, activation):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u524d\u5411\u4f20\u64ad\u7684\u6fc0\u6d3b<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> A_prev:<\/em><em>\u4e0a\u4e00\u5c42\u7684\u6fc0\u6d3b\u503c<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> W:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> b:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> activation: <\/em><em>\u6fc0\u6d3b\u51fd\u6570<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>assert activation in ['sigmoid', 'relu']<br \/>    Z, linear_cache = linear_forward(A_prev, W, b)<br \/>    if activation is 'sigmoid':<br \/>        A, activation_cache = sigmoid(Z)<br \/>    else:<br \/>        A, activation_cache = relu(Z)<br \/><br \/>    #    \u8fdb\u884cxw+b(\u6309\u987a\u5e8f)\u7684\u5404\u4e2a\u53c2\u6570  # \u8fdb\u884c\u6fc0\u6d3b\u7684Z<br \/>    cache = linear_cache, activation_cache<br \/>    return A, cache<\/pre>\n\n\n<p>\u5230\u8fd9\u91cc\uff0c\u6211\u4eec\u53ef\u4ee5\u5b9e\u73b0\u4e00\u6b21\u795e\u7ecf\u7f51\u7edc\u7684\u524d\u5411\u4f20\u64ad\uff0c\u90a3\u6211\u4eec\u9700\u8981\u5199\u6df1\u5c42\u7684\u4f20\u64ad\uff0c\u4e5f\u5c31\u662f\u8fdb\u884cn\u6b21\u5355\u5c42\u7684\u4f20\u64ad\u8c03\u7528\uff08\u8fd9\u91cc\u591a\u7f13\u5b58\u4e86\u6fc0\u6d3b\u524d\u7684z\uff0c\u800c\u4e4b\u524d\u4f7f\u7528sigmoid\u65f6\u6ca1\u6709\u7f13\u5b58\uff0c\u4e00\u4e2a\u539f\u56e0\u662fsigmoid\u51fd\u6570\u7684\u5bfc\u6570\u4f9d\u7136\u6709sigmoid\u51fd\u6570\uff0c\u53ef\u4ee5\u7528\u6fc0\u6d3b\u540e\u7684A\u8fdb\u884c\u8ba1\u7b97\uff0c\u800cReLU\u5374\u6ca1\u6709\uff0c\u56e0\u6b64\u9700\u8981Z\u6765\u5145\u5f53\u590d\u5408\u51fd\u6570\u6c42\u5bfc\u540e\u7684x\u9879\uff09<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def L_model_forward(X, parameters):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u591a\u5c42\u6a21\u578b\u7684\u4f20\u64ad<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> X:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> parameters:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>caches = []<br \/>    A = X<br \/>    L = len(parameters) \/\/ 2<br \/>    for i in range(1, L):<br \/>        A, cache = linear_activation_forward(A, parameters['W' + str(i)], parameters['b' + str(i)], 'relu')<br \/>        caches.append(cache)<br \/>    AL, cahce = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], 'sigmoid')<br \/>    # caches \u4e2d\u4e3a \u6bcf\u4e00\u6b21\u4f20\u64ad\u8fc7\u7a0b\u7684\uff08\u8fdb\u884cxw+b\u7684\u5404\u4e2a\u53c2\u6570\uff0c\u8fdb\u884c\u6fc0\u6d3b\u7684Z\uff09<br \/>    caches.append(cahce)<br \/><br \/>    return AL, caches<\/pre>\n\n\n<p>\u6211\u4eec\u8fdb\u884c\u5b8c\u4e86\u524d\u5411\u4f20\u64ad\u7136\u540e\u8ba1\u7b97\u4ee3\u4ef7<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def compute_cost(AL, Y):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u8ba1\u7b97\u6210\u672c<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> AL:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> Y:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>m = Y.shape[1]<br \/>    cost = -np.sum(np.multiply(np.log(AL), Y) + np.multiply(np.log(1 - AL), 1 - Y)) \/ m<br \/><br \/>    # \u77e9\u9635\u8f6c\u6570\u7ec4(\u5355\u4e2a\u6570\u5b57)<br \/>    cost = np.squeeze(cost)<br \/>    return cost<br \/><\/pre>\n\n\n<p>\u7136\u540e\u8fdb\u884c\u53cd\u5411\u4f20\u64ad\uff0c\u9996\u5148\u662f\u8ba1\u7b97\u8bef\u5dee\uff0c\u5f53\u7136\u4e5f\u662f\u53ea\u8ba1\u7b97\u4e00\u8f6e\u3002<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def linear_backward(dZ, cache):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u53cd\u5411\u4f20\u64ad<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> dZ:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> cache:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>A_prev, W, b = cache<br \/>    m = A_prev.shape[1]<br \/>    dW = np.dot(dZ, A_prev.T) \/ m<br \/>    db = np.sum(dZ, axis=1, keepdims=True) \/ m<br \/>    dA_prev = np.dot(W.T, dZ)<br \/><br \/>    return dA_prev, dW, db<\/pre>\n\n\n<p>\u7136\u540e\u662f\u6fc0\u6d3b\u51fd\u6570\u90e8\u5206\uff0c\u4e00\u6837\uff0c\u6839\u636e\u524d\u5411\u4f20\u64ad\u65f6\u4f7f\u7528\u7684\u4e0d\u540c\u6fc0\u6d3b\u51fd\u6570\u8fdb\u884c\u6fc0\u6d3b\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def linear_activation_backward(dA, cache, activation='relu'):\n    <em>\"\"\"\n    \u53cd\u5411\u4f20\u64ad\u7684\u6fc0\u6d3b\n    <\/em><strong><em>:param<\/em><\/strong><em> dA:\n    <\/em><strong><em>:param<\/em><\/strong><em> cache:\n    <\/em><strong><em>:param<\/em><\/strong><em> activation:\n    <\/em><strong><em>:return<\/em><\/strong><em>:\n    \"\"\"\n    <\/em>assert activation in ['sigmoid', 'relu']\n    linear_cache, activation_cache = cache\n    if activation is \"relu\":\n        dZ = relu_backward(dA, activation_cache)\n        dA_prev, dW, db = linear_backward(dZ, linear_cache)\n    elif activation is \"sigmoid\":\n        dZ = sigmoid_backward(dA, activation_cache)\n        dA_prev, dW, db = linear_backward(dZ, linear_cache)\n    return dA_prev, dW, db<\/pre>\n\n\n<p>\u7136\u540e\u662fL\u5c42\u8fdb\u884c\u53cd\u5411\u4f20\u64ad\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def L_model_backward(AL, Y, caches):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u591a\u5c42\u7f51\u7edc\u7684\u53cd\u5411\u4f20\u64ad<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> AL:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> Y:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> caches:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>grads = {}<br \/>    L = len(caches)<br \/>    Y = Y.reshape(AL.shape)<br \/>    dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))<br \/><br \/>    current_cache = caches[-1]<br \/>    grads['dA' + str(L)], grads['dW' + str(L)], grads['db' + str(L)] = linear_activation_backward(dAL, current_cache,<br \/>                                                                                                  'sigmoid')<br \/>    for l in range(L - 1):<br \/>        current_cache = caches[l]<br \/>        dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads['dA' + str(l + 2)], current_cache, 'relu')<br \/>        grads['dA' + str(l + 1)] = dA_prev_temp<br \/>        grads['dW' + str(l + 1)] = dW_temp<br \/>        grads['db' + str(l + 1)] = db_temp<br \/><br \/>    return grads<\/pre>\n\n\n<p>\u66f4\u65b0\u53c2\u6570\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def update_parameters(parameters, grads, learning_rate):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u66f4\u65b0\u53c2\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> parameters:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> grads:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> learning_rate:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>L = len(parameters) \/\/ 2<br \/>    for i in range(L):<br \/>        parameters['W' + str(i + 1)] = parameters['W' + str(i + 1)] - learning_rate * grads['dW' + str(i + 1)]<br \/>        parameters['b' + str(i + 1)] = parameters['b' + str(i + 1)] - learning_rate * grads['db' + str(i + 1)]<br \/><br \/>    return parameters<\/pre>\n\n\n<p>\u9884\u6d4b\u7ed3\u679c\u7684\u9884\u6d4b\u51fd\u6570\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def predict(X, y, parameters):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u9884\u6d4b\u7ed3\u679c<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> X:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> y:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> parameters:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>m = X.shape[1]<br \/>    p = np.zeros((1, m))<br \/><br \/>    # \u6839\u636e\u53c2\u6570\u524d\u5411\u4f20\u64ad<br \/>    probas, caches = L_model_forward(X, parameters)<br \/><br \/>    for i in range(0, probas.shape[1]):<br \/>        if probas[0, i] &gt; 0.5:<br \/>            p[0, i] = 1<br \/>        else:<br \/>            p[0, i] = 0<br \/><br \/>    print(\"\u51c6\u786e\u5ea6\u4e3a: \" + str(float(np.sum((p == y)) \/ m)))<br \/>    return p<\/pre>\n\n\n<p>\u7136\u540e\u662f\u795e\u7ecf\u7f51\u7edc\u6a21\u578b\uff0c\u5c06\u4e0a\u9762\u6240\u6709\u7684\u65b9\u6cd5\u4e32\u8d77\u6765\u4f7f\u7528\uff08\u65b9\u6cd5\u540d\u662f\u4e24\u5c42\u7f51\u7edc\uff0c\u4f46\u662f\u591a\u5c42\u5b9e\u73b0\u4f9d\u7136\u5728\u91cc\u9762\uff0c\u53ea\u8981\u5c06\u6ce8\u91ca\u89e3\u5f00\uff0c\u6539\u53d8laters_dims\u7684\u503c\u5c31\u53ef\u4ee5\u53d8\u6210\u6df1\u5c42\u7f51\u7edc\uff09\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def two_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False, isPlot=True):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u5b9e\u73b0\u4e00\u4e2a\u4e24\u5c42\u7684\u795e\u7ecf\u7f51\u7edc<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> X:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> Y:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> layers_dims:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> learning_rate:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> num_iteration:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> print_cost:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> isPlot:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>np.random.seed(1)<br \/><br \/>    grads = {}<br \/>    costs = []<br \/>    n_x, n_h, n_y = layers_dims<br \/><br \/>    parameters = init_two_layer_parameters(n_x, n_h, n_y)<br \/>    # parameters = init_deep_layers_parameters(layers_dims)<br \/><br \/>    W1 = parameters['W1']<br \/>    W2 = parameters['W2']<br \/>    b1 = parameters['b1']<br \/>    b2 = parameters['b2']<br \/><br \/>    for i in range(num_iterations):<br \/>        # \u524d\u5411\u4f20\u64ad<br \/>        A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')<br \/>        A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')<br \/>        # A2, caches = L_model_forward(X, parameters)<br \/><br \/>        cost = compute_cost(A2, Y)<br \/><br \/>        # \u53cd\u5411\u4f20\u64ad<br \/>        dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))<br \/>        dA1, dW2, db2 = linear_activation_backward(dA2, cache2, \"sigmoid\")<br \/>        dA0, dW1, db1 = linear_activation_backward(dA1, cache1, \"relu\")<br \/>        # grads = L_model_backward(A2, Y, caches)<br \/><br \/>        # \u5411\u540e\u4f20\u64ad\u5b8c\u6210\u540e\u7684\u6570\u636e\u4fdd\u5b58\u5230grads<br \/>        grads[\"dW1\"] = dW1<br \/>        grads[\"db1\"] = db1<br \/>        grads[\"dW2\"] = dW2<br \/>        grads[\"db2\"] = db2<br \/><br \/>        # \u66f4\u65b0\u53c2\u6570<br \/>        parameters = update_parameters(parameters, grads, learning_rate)<br \/>        W1 = parameters[\"W1\"]<br \/>        b1 = parameters[\"b1\"]<br \/>        W2 = parameters[\"W2\"]<br \/>        b2 = parameters[\"b2\"]<br \/><br \/>        # \u6253\u5370\u6210\u672c\u503c\uff0c\u5982\u679cprint_cost=False\u5219\u5ffd\u7565<br \/>        if i % 100 == 0:<br \/>            # \u8bb0\u5f55\u6210\u672c<br \/>            costs.append(cost)<br \/>            # \u662f\u5426\u6253\u5370\u6210\u672c\u503c<br \/>            if print_cost:<br \/>                print(\"\u7b2c\", i, \"\u6b21\u8fed\u4ee3\uff0c\u6210\u672c\u503c\u4e3a\uff1a\", np.squeeze(cost))<br \/>    # \u8fed\u4ee3\u5b8c\u6210\uff0c\u6839\u636e\u6761\u4ef6\u7ed8\u5236\u56fe<br \/>    if isPlot:<br \/>        plt.plot(np.squeeze(costs))<br \/>        plt.ylabel('cost')<br \/>        plt.xlabel('iterations (per tens)')<br \/>        plt.title(\"Learning rate =\" + str(learning_rate))<br \/>        plt.show()<br \/>    # \u8fd4\u56deparameters<br \/>    return parameters<\/pre>\n\n\n<p>\u6700\u540e\u8bfb\u53d6\u6570\u636e\u96c6\u4e0e\u6807\u51c6\u5316\uff0c\u8c03\u7528\u7f51\u7edc\u6a21\u578b\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\"># \u9884\u5904\u7406\u6570\u636e\u96c6<br \/>train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = lr_utils.load_dataset()<br \/><br \/>train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T<br \/>test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T<br \/><br \/># RGB\u6807\u51c6\u5316<br \/>train_x = train_x_flatten \/ 255<br \/>train_y = train_set_y<br \/>test_x = test_x_flatten \/ 255<br \/>test_y = test_set_y<br \/><br \/>n_x = 12288<br \/>n_h = 7<br \/>n_y = 1<br \/>layers_dims = (n_x, n_h, n_y)<br \/><br \/>parameters = two_layer_model(train_x, train_set_y, layers_dims=(n_x, n_h, n_y), num_iterations=2500, print_cost=True,<br \/>                             isPlot=True)<\/pre>\n\n\n<p>\u7ed3\u679c\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">\u7b2c 2300 \u6b21\u8fed\u4ee3\uff0c\u6210\u672c\u503c\u4e3a\uff1a 0.05336140348560556<br \/>\u7b2c 2400 \u6b21\u8fed\u4ee3\uff0c\u6210\u672c\u503c\u4e3a\uff1a 0.048554785628770185<br \/>\u51c6\u786e\u5ea6\u4e3a: 1.0<br \/>\u51c6\u786e\u5ea6\u4e3a: 0.72<\/pre>\n\n\n<p>\u751f\u6210\u56fe\u50cf\uff1a<\/p>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"379\" height=\"282\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-37.png\" alt=\"\" class=\"wp-image-2908\" srcset=\"http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-37.png 379w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-37-300x223.png 300w\" sizes=\"(max-width: 379px) 100vw, 379px\" \/><\/figure><\/div>\n\n\n<p>\u4e0a\u9762\u662f\u4f7f\u7528\u53cc\u5c42\u53c2\u6570\u521d\u59cb\u5316\uff0c\u6309\u8bf4\u4f7f\u7528\u591a\u5c42\u53c2\u6570\u521d\u59cb\u5316\u5bf9\u7ed3\u679c\u4e0d\u4f1a\u6784\u6210\u5f88\u5927\u5f71\u54cd\uff0c\u4f46\u662f\u5b9e\u9645\u4e0a\u7ed3\u679c\u5982\u4e0b\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">\u7b2c 2300 \u6b21\u8fed\u4ee3\uff0c\u6210\u672c\u503c\u4e3a\uff1a 0.14781357997051983<br \/>\u7b2c 2400 \u6b21\u8fed\u4ee3\uff0c\u6210\u672c\u503c\u4e3a\uff1a 0.12935258942424563<br \/>\u51c6\u786e\u5ea6\u4e3a: 1.0<br \/>\u51c6\u786e\u5ea6\u4e3a: 0.74<\/pre>\n\n\n<p>\u867d\u7136\u4ee3\u4ef7\u53d8\u9ad8\u4e86\uff0c\u4f46\u662f\u51c6\u786e\u5ea6\u5374\u4e0a\u5347\u4e86\u3002<\/p>\n\n\n<p>\u6211\u4eec\u67e5\u770b\u4e00\u4e0b\u8bc6\u522b\u5931\u8d25\u7684\u56fe\u7247\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\">def print_mislabeled_images(classes, X, y, p):<br \/>    <em>\"\"\"<br \/><\/em><em>   <\/em><em>\u7ed8\u5236\u9884\u6d4b\u548c\u5b9e\u9645\u4e0d\u540c\u7684\u56fe\u50cf\u3002<\/em><em><br \/><\/em><em>       X - <\/em><em>\u6570\u636e\u96c6<\/em><em><br \/><\/em><em>       y - <\/em><em>\u5b9e\u9645\u7684\u6807\u7b7e<\/em><em><br \/><\/em><em>       p - <\/em><em>\u9884\u6d4b<\/em><em><br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>a = p + y<br \/>    mislabeled_indices = np.asarray(np.where(a == 1))<br \/>    plt.rcParams['figure.figsize'] = (40.0, 40.0)  # set default size of plots<br \/>    num_images = len(mislabeled_indices[0])<br \/>    for i in range(num_images):<br \/>        index = mislabeled_indices[1][i]<br \/><br \/>        plt.subplot(2, num_images, i + 1)<br \/>        plt.imshow(X[:, index].reshape(64, 64, 3), interpolation='nearest')<br \/>        plt.axis('off')<br \/>        plt.title(<br \/>            \"Prediction: \" + classes[int(p[0, index])].decode(\"utf-8\") + \" \\n Class: \" + classes[y[0, index]].decode(<br \/>                \"utf-8\"))<br \/>    plt.show()<br \/><br \/><br \/>print_mislabeled_images(classes, test_x, test_y, pred_test)<br \/><\/pre>\n\n\n<p>\u53d1\u73b0\u786e\u5b9e\u6709\u4e00\u4e9b\u4e0d\u662f\u732b\uff0c\u4f46\u662f\u6709\u4e00\u4e9b\u59ff\u52bf\u5343\u5947\u767e\u602a\u7684\u732b\u4f9d\u7136\u6ca1\u6709\u529e\u6cd5\u8fdb\u884c\u8bc6\u522b\u3002<\/p>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"511\" height=\"131\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-38.png\" alt=\"\" class=\"wp-image-2909\" srcset=\"http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-38.png 511w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-38-300x77.png 300w\" sizes=\"(max-width: 511px) 100vw, 511px\" \/><\/figure><\/div>\n\n\n<div class=\"wp-block-image\"><figure class=\"aligncenter\"><img loading=\"lazy\" decoding=\"async\" width=\"545\" height=\"122\" src=\"\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-39.png\" alt=\"\" class=\"wp-image-2910\" srcset=\"http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-39.png 545w, http:\/\/www.sniper97.cn\/wp-content\/uploads\/2020\/02\/\u56fe\u7247-39-300x67.png 300w\" sizes=\"(max-width: 545px) 100vw, 545px\" \/><\/figure><\/div>\n\n\n<p>\u5b8c\u6574\u4ee3\u7801\uff1a<\/p>\n\n\n<pre class=\"wp-block-preformatted\"># -*- coding:utf-8 -*-<br \/><br \/><em>\"\"\"<br \/><\/em><em>      \u250f\u251b \u253b\u2501\u2501\u2501\u2501\u2501\u251b \u253b\u2513<br \/><\/em><em>      \u2503<\/em><em>\u3000\u3000\u3000\u3000\u3000\u3000<\/em><em> \u2503<br \/><\/em><em>      \u2503<\/em><em>\u3000\u3000\u3000<\/em><em>\u2501<\/em><em>\u3000\u3000\u3000<\/em><em>\u2503<br \/><\/em><em>      \u2503<\/em><em>\u3000<\/em><em>\u2533\u251b<\/em><em>\u3000<\/em><em>  \u2517\u2533<\/em><em>\u3000<\/em><em>\u2503<br \/><\/em><em>      \u2503<\/em><em>\u3000\u3000\u3000\u3000\u3000\u3000<\/em><em> \u2503<br \/><\/em><em>      \u2503<\/em><em>\u3000\u3000\u3000<\/em><em>\u253b<\/em><em>\u3000\u3000\u3000<\/em><em>\u2503<br \/><\/em><em>      \u2503<\/em><em>\u3000\u3000\u3000\u3000\u3000\u3000<\/em><em> \u2503<br \/><\/em><em>      \u2517\u2501\u2513<\/em><em>\u3000\u3000\u3000<\/em><em>\u250f\u2501\u2501\u2501\u251b<br \/><\/em><em>        \u2503<\/em><em>\u3000\u3000\u3000<\/em><em>\u2503   <\/em><em>\u795e\u517d\u4fdd\u4f51<\/em><em><br \/><\/em><em>        \u2503<\/em><em>\u3000\u3000\u3000<\/em><em>\u2503   <\/em><em>\u4ee3\u7801\u65e0<\/em><em>BUG<\/em><em>\uff01<\/em><em><br \/><\/em><em>        \u2503<\/em><em>\u3000\u3000\u3000<\/em><em>\u2517\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2513<br \/><\/em><em>        \u2503<\/em><em>\u3000\u3000\u3000\u3000\u3000\u3000\u3000<\/em><em>    \u2523\u2513<br \/><\/em><em>        \u2503<\/em><em>\u3000\u3000\u3000\u3000<\/em><em>         \u250f\u251b<br \/><\/em><em>        \u2517\u2501\u2513 \u2513 \u250f\u2501\u2501\u2501\u2533 \u2513 \u250f\u2501\u251b<br \/><\/em><em>          \u2503 \u252b \u252b   \u2503 \u252b \u252b<br \/><\/em><em>          \u2517\u2501\u253b\u2501\u251b   \u2517\u2501\u253b\u2501\u251b<br \/><\/em><em>\"\"\"<br \/><\/em><em><br \/><\/em>import numpy as np<br \/>import h5py<br \/>import matplotlib.pyplot as plt<br \/>from course_1_week_4 import testCases<br \/>from course_1_week_4.dnn_utils import sigmoid, sigmoid_backward, relu, relu_backward<br \/>from course_1_week_4 import lr_utils<br \/><br \/># \u6307\u5b9a\u968f\u673a\u6570\u79cd\u5b50<br \/>np.random.seed(1)<br \/><br \/><br \/>def init_two_layer_parameters(n_x, n_h, n_y):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u968f\u673a\u521d\u59cb\u5316\u53c2\u6570\uff08\u4e00\u4e2a\u4e24\u5c42\u7684\u795e\u7ecf\u7f51\u7edc\uff09<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> n_x: <\/em><em>\u8f93\u5165\u5c42\u7ed3\u70b9\u4e2a\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> n_h: <\/em><em>\u9690\u85cf\u5c42\u7ed3\u70b9\u4e2a\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> n_y: <\/em><em>\u8f93\u51fa\u5c42\u7ed3\u70b9\u4e2a\u6570<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>W1 = np.random.randn(n_h, n_x) * 0.01<br \/>    b1 = np.zeros((n_h, 1))<br \/>    W2 = np.random.randn(n_y, n_h) * 0.01<br \/>    b2 = np.zeros((n_y, 1))<br \/><br \/>    parameters = {<br \/>        'W1': W1,<br \/>        'b1': b1,<br \/>        'W2': W2,<br \/>        'b2': b2<br \/>    }<br \/>    return parameters<br \/><br \/><br \/>def init_deep_layers_parameters(layers_dim):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u521d\u59cb\u5316\u591a\u5c42\u795e\u7ecf\u7f51\u7edc\u53c2\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> layers_dim: <\/em><em>\u6bcf\u5c42\u8282\u70b9\u6570\u7684\u5217\u8868<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>parameters = {}<br \/>    for i in range(1, len(layers_dim)):<br \/>        parameters['W' + str(i)] = np.random.randn(layers_dim[i], layers_dim[i - 1]) * 0.01<br \/>        parameters['b' + str(i)] = np.zeros((layers_dim[i], 1))<br \/><br \/>    return parameters<br \/><br \/><br \/>def linear_forward(A, W, b):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u524d\u5411\u4f20\u64ad\uff08\u53ea\u7b97<\/em><em>wx+b<\/em><em>\uff0c\u4e0d\u6fc0\u6d3b\uff09<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> A:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> W:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> b:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>Z = np.dot(W, A) + b<br \/>    cache = (A, W, b)<br \/>    return Z, cache<br \/><br \/><br \/>def linear_activation_forward(A_prev, W, b, activation):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u524d\u5411\u4f20\u64ad\u7684\u6fc0\u6d3b<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> A_prev:<\/em><em>\u4e0a\u4e00\u5c42\u7684\u6fc0\u6d3b\u503c<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> W:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> b:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> activation: <\/em><em>\u6fc0\u6d3b\u51fd\u6570<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>assert activation in ['sigmoid', 'relu']<br \/>    Z, linear_cache = linear_forward(A_prev, W, b)<br \/>    if activation is 'sigmoid':<br \/>        A, activation_cache = sigmoid(Z)<br \/>    else:<br \/>        A, activation_cache = relu(Z)<br \/><br \/>    #    \u8fdb\u884cxw+b(\u6309\u987a\u5e8f)\u7684\u5404\u4e2a\u53c2\u6570  # \u8fdb\u884c\u6fc0\u6d3b\u7684Z<br \/>    cache = linear_cache, activation_cache<br \/>    return A, cache<br \/><br \/><br \/>def L_model_forward(X, parameters):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u591a\u5c42\u6a21\u578b\u7684\u4f20\u64ad<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> X:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> parameters:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>caches = []<br \/>    A = X<br \/>    L = len(parameters) \/\/ 2<br \/>    for i in range(1, L):<br \/>        A, cache = linear_activation_forward(A, parameters['W' + str(i)], parameters['b' + str(i)], 'relu')<br \/>        caches.append(cache)<br \/>    AL, cahce = linear_activation_forward(A, parameters['W' + str(L)], parameters['b' + str(L)], 'sigmoid')<br \/>    # caches \u4e2d\u4e3a \u6bcf\u4e00\u6b21\u4f20\u64ad\u8fc7\u7a0b\u7684\uff08\u8fdb\u884cxw+b\u7684\u5404\u4e2a\u53c2\u6570\uff0c\u8fdb\u884c\u6fc0\u6d3b\u7684Z\uff09<br \/>    caches.append(cahce)<br \/><br \/>    return AL, caches<br \/><br \/><br \/>def compute_cost(AL, Y):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u8ba1\u7b97\u6210\u672c<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> AL:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> Y:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>m = Y.shape[1]<br \/>    cost = -np.sum(np.multiply(np.log(AL), Y) + np.multiply(np.log(1 - AL), 1 - Y)) \/ m<br \/><br \/>    # \u77e9\u9635\u8f6c\u6570\u7ec4(\u5355\u4e2a\u6570\u5b57)<br \/>    cost = np.squeeze(cost)<br \/>    return cost<br \/><br \/><br \/>def linear_backward(dZ, cache):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u53cd\u5411\u4f20\u64ad<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> dZ:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> cache:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>A_prev, W, b = cache<br \/>    m = A_prev.shape[1]<br \/>    dW = np.dot(dZ, A_prev.T) \/ m<br \/>    db = np.sum(dZ, axis=1, keepdims=True) \/ m<br \/>    dA_prev = np.dot(W.T, dZ)<br \/><br \/>    return dA_prev, dW, db<br \/><br \/><br \/>def linear_activation_backward(dA, cache, activation='relu'):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u53cd\u5411\u4f20\u64ad\u7684\u6fc0\u6d3b<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> dA:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> cache:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> activation:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>assert activation in ['sigmoid', 'relu']<br \/>    linear_cache, activation_cache = cache<br \/>    if activation is \"relu\":<br \/>        dZ = relu_backward(dA, activation_cache)<br \/>        dA_prev, dW, db = linear_backward(dZ, linear_cache)<br \/>    elif activation is \"sigmoid\":<br \/>        dZ = sigmoid_backward(dA, activation_cache)<br \/>        dA_prev, dW, db = linear_backward(dZ, linear_cache)<br \/><br \/>    return dA_prev, dW, db<br \/><br \/><br \/>def L_model_backward(AL, Y, caches):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u591a\u5c42\u7f51\u7edc\u7684\u53cd\u5411\u4f20\u64ad<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> AL:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> Y:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> caches:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>grads = {}<br \/>    L = len(caches)<br \/>    Y = Y.reshape(AL.shape)<br \/>    dAL = -(np.divide(Y, AL) - np.divide(1 - Y, 1 - AL))<br \/><br \/>    current_cache = caches[-1]<br \/>    grads['dA' + str(L)], grads['dW' + str(L)], grads['db' + str(L)] = linear_activation_backward(dAL, current_cache,<br \/>                                                                                                  'sigmoid')<br \/>    for l in reversed(range(L-1)):<br \/>        current_cache = caches[l]<br \/>        dA_prev_temp, dW_temp, db_temp = linear_activation_backward(grads['dA' + str(l + 2)], current_cache, 'relu')<br \/>        grads['dA' + str(l + 1)] = dA_prev_temp<br \/>        grads['dW' + str(l + 1)] =\n dW_temp<br \/>        grads['db' + str(l + 1)] = db_temp<br \/><br \/>    return grads<br \/><br \/><br \/>def update_parameters(parameters, grads, learning_rate):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u66f4\u65b0\u53c2\u6570<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> parameters:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> grads:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> learning_rate:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>L = len(parameters) \/\/ 2<br \/>    for i in range(L):<br \/>        parameters['W' + str(i + 1)] = parameters['W' + str(i + 1)] - learning_rate * grads['dW' + str(i + 1)]<br \/>        parameters['b' + str(i + 1)] = parameters['b' + str(i + 1)] - learning_rate * grads['db' + str(i + 1)]<br \/><br \/>    return parameters<br \/><br \/><br \/>def two_layer_model(X, Y, layers_dims, learning_rate=0.0075, num_iterations=3000, print_cost=False, isPlot=True):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u5b9e\u73b0\u4e00\u4e2a\u4e24\u5c42\u7684\u795e\u7ecf\u7f51\u7edc<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> X:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> Y:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> layers_dims:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> learning_rate:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> num_iteration:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> print_cost:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> isPlot:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>np.random.seed(1)<br \/><br \/>    grads = {}<br \/>    costs = []<br \/>    # n_x, n_h, n_y = layers_dims<br \/><br \/>    # parameters = init_two_layer_parameters(n_x, n_h, n_y)<br \/>    parameters = init_deep_layers_parameters(layers_dims)<br \/><br \/>    W1 = parameters['W1']<br \/>    W2 = parameters['W2']<br \/>    b1 = parameters['b1']<br \/>    b2 = parameters['b2']<br \/><br \/>    for i in range(num_iterations):<br \/>        # \u524d\u5411\u4f20\u64ad<br \/>        # A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')<br \/>        # A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')<br \/>        AL, caches = L_model_forward(X, parameters)<br \/><br \/>        cost = compute_cost(AL, Y)<br \/><br \/>        # \u53cd\u5411\u4f20\u64ad<br \/>        # dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))<br \/>        # dA1, dW2, db2 = linear_activation_backward(dA2, cache2, \"sigmoid\")<br \/>        # dA0, dW1, db1 = linear_activation_backward(dA1, cache1, \"relu\")<br \/>        grads = L_model_backward(AL, Y, caches)<br \/><br \/>        # \u5411\u540e\u4f20\u64ad\u5b8c\u6210\u540e\u7684\u6570\u636e\u4fdd\u5b58\u5230grads<br \/>        # grads[\"dW1\"] = dW1<br \/>        # grads[\"db1\"] = db1<br \/>        # grads[\"dW2\"] = dW2<br \/>        # grads[\"db2\"] = db2<br \/><br \/>        # \u66f4\u65b0\u53c2\u6570<br \/>        parameters = update_parameters(parameters, grads, learning_rate)<br \/>        # W1 = parameters[\"W1\"]<br \/>        # b1 = parameters[\"b1\"]<br \/>        # W2 = parameters[\"W2\"]<br \/>        # b2 = parameters[\"b2\"]<br \/><br \/>        # \u6253\u5370\u6210\u672c\u503c\uff0c\u5982\u679cprint_cost=False\u5219\u5ffd\u7565<br \/>        if i % 100 == 0:<br \/>            # \u8bb0\u5f55\u6210\u672c<br \/>            costs.append(cost)<br \/>            # \u662f\u5426\u6253\u5370\u6210\u672c\u503c<br \/>            if print_cost:<br \/>                print(\"\u7b2c\", i, \"\u6b21\u8fed\u4ee3\uff0c\u6210\u672c\u503c\u4e3a\uff1a\", np.squeeze(cost))<br \/>    # \u8fed\u4ee3\u5b8c\u6210\uff0c\u6839\u636e\u6761\u4ef6\u7ed8\u5236\u56fe<br \/>    if isPlot:<br \/>        plt.plot(np.squeeze(costs))<br \/>        plt.ylabel('cost')<br \/>        plt.xlabel('iterations (per tens)')<br \/>        plt.title(\"Learning rate =\" + str(learning_rate))<br \/>        plt.show()<br \/>    # \u8fd4\u56deparameters<br \/>    return parameters<br \/><br \/><br \/>def predict(X, y, parameters):<br \/>    <em>\"\"\"<br \/><\/em><em>    <\/em><em>\u9884\u6d4b\u7ed3\u679c<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> X:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> y:<br \/><\/em><em>    <\/em><strong><em>:param<\/em><\/strong><em> parameters:<br \/><\/em><em>    <\/em><strong><em>:return<\/em><\/strong><em>:<br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>m = X.shape[1]<br \/>    p = np.zeros((1, m))<br \/><br \/>    # \u6839\u636e\u53c2\u6570\u524d\u5411\u4f20\u64ad<br \/>    probas, caches = L_model_forward(X, parameters)<br \/><br \/>    for i in range(0, probas.shape[1]):<br \/>        if probas[0, i] &gt; 0.5:<br \/>            p[0, i] = 1<br \/>        else:<br \/>            p[0, i] = 0<br \/><br \/>    print(\"\u51c6\u786e\u5ea6\u4e3a: \" + str(float(np.sum((p == y)) \/ m)))<br \/>    return p<br \/><br \/><br \/># \u9884\u5904\u7406\u6570\u636e\u96c6<br \/>train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = lr_utils.load_dataset()<br \/><br \/>train_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1).T<br \/>test_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1).T<br \/><br \/># RGB\u6807\u51c6\u5316<br \/>train_x = train_x_flatten \/ 255<br \/>train_y = train_set_y<br \/>test_x = test_x_flatten \/ 255<br \/>test_y = test_set_y<br \/><br \/>n_x = 12288<br \/>n_h = 7<br \/>n_y = 1<br \/>layers_dims = (n_x, n_h, n_y)<br \/><br \/>parameters = two_layer_model(train_x, train_set_y, layers_dims=(n_x, n_h, n_y), num_iterations=2500, print_cost=True,<br \/>                             isPlot=True)<br \/>\"\"\"<br \/>cost\uff1a 0.05336140348560556<br \/>cost\uff1a 0.048554785628770185<br \/>train\u51c6\u786e\u5ea6\u4e3a: 1.0<br \/>test\u51c6\u786e\u5ea6\u4e3a: 0.72<br \/><br \/>cost\u4e3a\uff1a 0.14781357997051983<br \/>cost\uff1a 0.12935258942424563<br \/>train\u51c6\u786e\u5ea6\u4e3a: 1.0<br \/>test\u51c6\u786e\u5ea6\u4e3a: 0.74<br \/>\"\"\"<br \/><br \/>pred_train = predict(train_x, train_y, parameters)<br \/>pred_test = predict(test_x, test_y, parameters)<br \/><br \/><br \/>def print_mislabeled_images(classes, X, y, p):<br \/>    <em>\"\"\"<br \/><\/em><em>   <\/em><em>\u7ed8\u5236\u9884\u6d4b\u548c\u5b9e\u9645\u4e0d\u540c\u7684\u56fe\u50cf\u3002<\/em><em><br \/><\/em><em>       X - <\/em><em>\u6570\u636e\u96c6<\/em><em><br \/><\/em><em>       y - <\/em><em>\u5b9e\u9645\u7684\u6807\u7b7e<\/em><em><br \/><\/em><em>       p - <\/em><em>\u9884\u6d4b<\/em><em><br \/><\/em><em>    \"\"\"<br \/><\/em><em>    <\/em>a = p + y<br \/>    mislabeled_indices = np.asarray(np.where(a == 1))<br \/>    plt.rcParams['figure.figsize'] = (40.0, 40.0)  # set default size of plots<br \/>    num_images = len(mislabeled_indices[0])<br \/>    for i in range(num_images):<br \/>        index = mislabeled_indices[1][i]<br \/><br \/>        plt.subplot(2, num_images, i + 1)<br \/>        plt.imshow(X[:, index].reshape(64, 64, 3), interpolation='nearest')<br \/>        plt.axis('off')<br \/>        plt.title(<br \/>            \"Prediction: \" + classes[int(p[0, index])].decode(\"utf-8\") + \" \\n Class: \" + classes[y[0, index]].decode(<br \/>                \"utf-8\"))<br \/>    plt.show()<br \/><br \/><br \/>print_mislabeled_images(classes, test_x, test_y, pred_test)<br \/><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>\u5434\u6069\u8fbe\u6df1\u5ea6\u5b66\u4e60\u7b2c\u4e00\u8bfe\u7b2c\u56db\u5468 \u6df1\u5ea6\u5b66\u4e60\u7f51\u7edc 1.\u4ec0\u4e48\u662f\u6df1\u5c42\u795e\u7ecf\u7f51\u7edc \u5982\u4e0b\u56fe\uff0c\u5206\u522b\u662f\u903b\u8f91\u56de\u5f52\uff0c\u6709\u4e00\u4e2a\u9690\u85cf [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"om_disable_all_campaigns":false,"_mi_skip_tracking":false,"_monsterinsights_sitenote_active":false,"_monsterinsights_sitenote_note":"","_monsterinsights_sitenote_category":0,"footnotes":""},"categories":[7],"tags":[],"views":3962,"_links":{"self":[{"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/posts\/2888"}],"collection":[{"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/comments?post=2888"}],"version-history":[{"count":0,"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/posts\/2888\/revisions"}],"wp:attachment":[{"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/media?parent=2888"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/categories?post=2888"},{"taxonomy":"post_tag","embeddable":true,"href":"http:\/\/www.sniper97.cn\/index.php\/wp-json\/wp\/v2\/tags?post=2888"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}