{"id":693,"date":"2026-03-12T11:53:36","date_gmt":"2026-03-12T03:53:36","guid":{"rendered":"https:\/\/www.vcoco.top\/?p=693"},"modified":"2026-03-12T11:53:36","modified_gmt":"2026-03-12T03:53:36","slug":"%e5%9f%ba%e4%ba%8eflow-matching%e7%9a%84%e6%89%a9%e6%95%a3%e6%a8%a1%e5%9e%8bforward%e3%80%81inverse%e5%8e%9f%e7%90%86%e5%8f%8a%e4%bb%a3%e7%a0%81","status":"publish","type":"post","link":"https:\/\/www.vcoco.top\/index.php\/2026\/03\/12\/%e5%9f%ba%e4%ba%8eflow-matching%e7%9a%84%e6%89%a9%e6%95%a3%e6%a8%a1%e5%9e%8bforward%e3%80%81inverse%e5%8e%9f%e7%90%86%e5%8f%8a%e4%bb%a3%e7%a0%81\/","title":{"rendered":"\u57fa\u4e8eFlow Matching\u7684\u6269\u6563\u6a21\u578bForward\u3001Inverse\u539f\u7406\u53ca\u4ee3\u7801"},"content":{"rendered":"\n<p>Stable Diffusion 3\u53ca3.5\u5f00\u59cb\uff0c\u5305\u62ecflux\u7f16\u8f91\u6a21\u578b\uff0c\u90fd\u4e0d\u518d\u91c7\u7528\u4f20\u7edfDDPM\u7684\u566a\u58f0\u6269\u6563\u6a21\u5f0f\uff0c\u800c\u662f\u91c7\u7528\u6d41\u5339\u914d\u7684\u5f62\u5f0f\u3002Flow Matching\u603b\u7ed3\u662f\uff1a\u7b80\u5355\u6709\u6548\u3002<\/p>\n\n\n\n<ul class=\"wp-block-list\">\n<li>\u8fd9\u7bc7\u6587\u7ae0\u91cc\u90fd\u91c7\u7528sd3.5\u4f5c\u4e3abackbone\u8fdb\u884c\u7814\u7a76\u3002<\/li>\n<\/ul>\n\n\n\n<h1 class=\"wp-block-heading\">\u539f\u7406<\/h1>\n\n\n\n<h3 class=\"wp-block-heading\">Flow Matching \u6838\u5fc3\u673a\u5236 \u2014\u2014 \u4ece\u201c\u9884\u6d4b\u566a\u58f0\u201d\u5230\u201c\u9884\u6d4b\u901f\u5ea6\u573a\u201d<\/h3>\n\n\n\n<p>\u5728 SD 1.5 \u548c SDXL \u65f6\u4ee3\uff0c\u6269\u6563\u6a21\u578b\u5efa\u7acb\u5728\u9a6c\u5c14\u53ef\u592b\u94fe\u548c\u968f\u673a\u5fae\u5206\u65b9\u7a0b\uff08SDE\uff09\u4e4b\u4e0a\u3002\u6a21\u578b\u901a\u8fc7\u9884\u6d4b\u201c\u566a\u58f0\uff08Noise\uff09\u201d\u6765\u4e00\u6b65\u6b65\u8270\u96be\u5730\u5c06\u9ad8\u65af\u5206\u5e03\u8fd8\u539f\u4e3a\u771f\u5b9e\u56fe\u50cf\u3002\u8fd9\u79cd\u6a21\u5f0f\u4e0d\u4ec5\u6570\u5b66\u63a8\u5bfc\u6781\u5176\u7e41\u7410\uff0c\u800c\u4e14\u751f\u6210\u8f68\u8ff9\u5f80\u5f80\u662f\u4e00\u6761\u6781\u5176\u66f2\u6298\u7684\u66f2\u7ebf\uff0c\u5bfc\u81f4\u91c7\u6837\u6548\u7387\u4f4e\u4e0b\u3002<\/p>\n\n\n\n<p>\u4ece Stable Diffusion 3\/3.5 \u5230 Flux\uff0c\u884c\u4e1a\u5168\u9762\u8f6c\u5411\u4e86 <strong>Flow Matching\uff08\u6d41\u5339\u914d\uff09<\/strong>\uff0c\u66f4\u5177\u4f53\u7684\u5b9e\u73b0\u5f62\u5f0f\u662f <strong>Rectified Flow\uff08\u6821\u6b63\u6d41\uff09<\/strong>\u3002\u5b83\u7684\u6838\u5fc3\u601d\u60f3\u53ef\u4ee5\u7528\u516d\u4e2a\u5b57\u6982\u62ec\uff1a<strong>\u4e24\u70b9\u4e4b\u95f4\uff0c\u76f4\u7ebf\u6700\u77ed\u3002<\/strong><\/p>\n\n\n\n<h4 class=\"wp-block-heading\">1. \u6570\u5b66\u76f4\u89c9\uff1a\u5f3a\u5236\u62c9\u76f4\u7684\u751f\u6210\u8f68\u8ff9<\/h4>\n\n\n\n<p>\u5982\u679c\u6211\u4eec\u6709\u4e00\u4e2a\u7eaf\u566a\u58f0\u5f20\u91cf $x_0 \\sim \\mathcal{N}(0, I)$\uff0c\u548c\u4e00\u4e2a\u76ee\u6807\u771f\u5b9e\u56fe\u50cf $x_1 \\sim P_{data}$\u3002\u4f20\u7edf DDPM \u4f1a\u5728\u4e24\u8005\u4e4b\u95f4\u6784\u5efa\u4e00\u4e2a\u590d\u6742\u7684\u52a0\u566a\/\u53bb\u566a\u968f\u673a\u6f2b\u6b65\u8fc7\u7a0b\u3002<\/p>\n\n\n\n<p>\u800c Flow Matching \u9009\u62e9\u4e86\u4e00\u79cd\u6781\u5176\u66b4\u529b\u7684\u7f8e\u5b66\uff1a\u76f4\u63a5\u5728\u566a\u58f0 $x_0$ \u548c\u56fe\u50cf $x_1$ \u4e4b\u95f4\u8fde\u4e00\u6761\u76f4\u7ebf\u3002<\/p>\n\n\n\n<p>\u6211\u4eec\u53ef\u4ee5\u5b9a\u4e49\u4e00\u4e2a\u6700\u7b80\u5355\u7684\u7ebf\u6027\u63d2\u503c\u8def\u5f84 $x_t$\uff1a<\/p>\n\n\n\n<p>$$x_t = (1-t)x_0 + t x_1$$<\/p>\n\n\n\n<p><em>(\u6ce8\uff1a\u6b64\u5904 $t$ \u4ece 0 \u8d70\u5411 1\uff0c\u4ee3\u8868\u4ece\u566a\u58f0\u8d70\u5411\u56fe\u50cf\u3002\u5728\u5177\u4f53\u7684\u4ee3\u7801\u5b9e\u73b0\u5982 diffusers \u4e2d\uff0c$t$ \u901a\u5e38\u5bf9\u5e94\u65f6\u95f4\u6b65\u6216\u566a\u58f0\u6bd4\u4f8b $\\sigma$ \u7684\u8870\u51cf)<\/em>\u3002<\/p>\n\n\n\n<h4 class=\"wp-block-heading\">2. \u6a21\u578b\u5728\u5b66\u4ec0\u4e48\uff1f\u2014\u2014 \u9884\u6d4b\u901f\u5ea6 (Velocity)<\/h4>\n\n\n\n<p>\u65e2\u7136\u8f68\u8ff9\u662f\u9884\u5148\u5b9a\u4e49\u597d\u7684\u76f4\u7ebf\uff0c\u6211\u4eec\u5bf9\u4e0a\u9762\u7684\u8def\u5f84\u516c\u5f0f\u6c42\u65f6\u95f4 $t$ \u7684\u5bfc\u6570\uff0c\u5c31\u80fd\u5f97\u5230\u4e00\u4e2a\u6052\u5b9a\u7684<strong>\u901f\u5ea6\u573a (Vector Field)<\/strong>\uff1a<\/p>\n\n\n\n<p>$$\\frac{dx_t}{dt} = x_1 &#8211; x_0$$<\/p>\n\n\n\n<p>\u5728 Flow Matching \u67b6\u6784\u4e0b\uff0c\u4ee5 SD3.5 \u7684 DiT (Diffusion Transformer) \u4e3a\u4f8b\uff0c<strong>\u6a21\u578b\u4e0d\u518d\u9884\u6d4b\u56fe\u50cf\u4e0a\u7684\u566a\u58f0\u6b8b\u5dee\uff0c\u800c\u662f\u76f4\u63a5\u9884\u6d4b\u8fd9\u4e2a\u6d41\u5411\u76ee\u6807\u56fe\u50cf\u7684\u901f\u5ea6\u5411\u91cf $v_\\theta(x_t, t)$<\/strong>\u3002<\/p>\n\n\n\n<p>\u6362\u53e5\u8bdd\u8bf4\uff0c\u6a21\u578b\u5728\u5b66\u4e60\uff1a\u5f53\u4f60\u7ad9\u5728\u6f5c\u5728\u7a7a\u95f4\uff08Latent Space\uff09\u7684\u4efb\u610f\u4e00\u4e2a\u70b9 $x_t$ \u65f6\uff0c\u98ce\u662f\u5f80\u54ea\u4e2a\u65b9\u5411\u5439\u7684\uff0c\u98ce\u901f\u6709\u591a\u5927\u3002<\/p>\n\n\n\n<h4 class=\"wp-block-heading\">3. \u91c7\u6837\u8fc7\u7a0b\uff1a\u6781\u7b80\u7684\u5e38\u5fae\u5206\u65b9\u7a0b (ODE) \u6c42\u89e3<\/h4>\n\n\n\n<p>\u7531\u4e8e\u653e\u5f03\u4e86\u590d\u6742\u7684\u968f\u673a\u5fae\u5206\u65b9\u7a0b\uff0c\u6d41\u5339\u914d\u7684\u751f\u6210\u8fc7\u7a0b\u9000\u5316\u4e3a\u4e86\u6c42\u89e3\u4e00\u4e2a\u7b80\u5355\u7684\u5e38\u5fae\u5206\u65b9\u7a0b\uff08ODE\uff09\u3002\u8fd9\u4f7f\u5f97\u6211\u4eec\u53ef\u4ee5\u4f7f\u7528\u6781\u5176\u8f7b\u91cf\u7ea7\u7684\u6570\u503c\u6c42\u89e3\u5668\uff08\u5982 Euler \u6cd5\uff09\u3002<\/p>\n\n\n\n<p>\u5728\u4ee3\u7801\u5c42\u9762\uff0c\u524d\u5411\u91c7\u6837\u7684\u6838\u5fc3\u903b\u8f91\u88ab\u7b80\u5316\u4e3a\u6781\u5176\u4f18\u96c5\u7684\u4e00\u884c\u6b27\u62c9\u6b65\u8fdb\uff1a<\/p>\n\n\n\n<p>$$x_{t+dt} = x_t + v_\\theta(x_t, t) \\cdot dt$$<\/p>\n\n\n\n<p>\u56e0\u4e3a Rectified Flow \u5728\u8bad\u7ec3\u65f6\u5f3a\u8feb\u8f68\u8ff9\u5c3d\u53ef\u80fd\u7b14\u76f4\uff08\u66f2\u7387\u6781\u4f4e\uff09\uff0c\u8fd9\u79cd\u6700\u57fa\u7840\u7684\u4e00\u9636 Euler \u6c42\u89e3\u5668\u5c31\u80fd\u5728\u975e\u5e38\u5c11\u7684\u6b65\u6570\uff08\u5982 20-28 \u6b65\uff09\u5185\u5b8c\u7f8e\u6536\u655b\uff0c\u76f4\u63a5\u8d70\u5230\u7ec8\u70b9\u751f\u6210\u9ad8\u6e05\u56fe\u50cf\u3002<\/p>\n\n\n\n<h4 class=\"wp-block-heading\">4. CFG (\u65e0\u5206\u7c7b\u5668\u5f15\u5bfc) \u7684\u51e0\u4f55\u610f\u4e49<\/h4>\n\n\n\n<p>\u5728 Flow Matching \u4e2d\uff0c\u5f15\u5165 Prompt \u63a7\u5236\u7684 Classifier-Free Guidance (CFG) \u4e0d\u518d\u662f\u5bf9\u566a\u58f0\u7684\u52a0\u51cf\uff0c\u800c\u662f<strong>\u901f\u5ea6\u5411\u91cf\u7684\u5916\u63a8<\/strong>\u3002<\/p>\n\n\n\n<p>\u6a21\u578b\u4f1a\u540c\u65f6\u9884\u6d4b\u65e0\u6761\u4ef6\u98ce\u5411 $v_{uncond}$ \u548c\u6709\u6761\u4ef6\u98ce\u5411 $v_{text}$\uff0c\u6700\u7ec8\u7684\u5f15\u5bfc\u901f\u5ea6\u4e3a\uff1a<\/p>\n\n\n\n<p>$$v_{guided} = v_{uncond} + \\omega \\cdot (v_{text} &#8211; v_{uncond})$$<\/p>\n\n\n\n<p>\u5176\u4e2d $\\omega$ \u5373\u4e3a <code>guidance_scale<\/code>\u3002\u8fd9\u76f8\u5f53\u4e8e\u5728\u591a\u7ef4\u7a7a\u95f4\u4e2d\uff0c\u5f3a\u884c\u5c06\u901f\u5ea6\u5411\u91cf\u671d\u7740\u63d0\u793a\u8bcd\u6307\u5f15\u7684\u65b9\u5411\u505a\u5ef6\u957f\u548c\u626d\u66f2\uff0c\u4ece\u800c\u8ba9\u6700\u7ec8\u7684\u843d\u70b9\uff08\u751f\u6210\u7684\u56fe\u50cf\uff09\u66f4\u52a0\u8d34\u5408\u6587\u672c\u8bed\u4e49\u3002<\/p>\n\n\n\n<h3 class=\"wp-block-heading\">Inverse\u8fc7\u7a0b\uff1a\u7406\u8bba\u4e0a\u7684\u65e0\u635f\u53cd\u6f14<\/h3>\n\n\n\n<h4 class=\"wp-block-heading\">1. ODE \u5e26\u6765\u7684\u7edd\u5bf9\u786e\u5b9a\u6027 (Deterministic Trajectory) <\/h4>\n\n\n\n<p>\u5728\u4f20\u7edf DDPM \u7684 SDE (\u968f\u673a\u5fae\u5206\u65b9\u7a0b) \u6846\u67b6\u4e0b\uff0c\u6b63\u5411\u52a0\u566a\u548c\u9006\u5411\u53bb\u566a\u90fd\u4f34\u968f\u7740\u968f\u673a\u91c7\u6837\u3002\u8fd9\u5c31\u597d\u6bd4\u5728\u66b4\u98ce\u96ea\u4e2d\u5bfb\u8def\uff0c\u5373\u4fbf\u4f60\u60f3\u539f\u8def\u8fd4\u56de\uff0c\u8d70\u8fc7\u7684\u811a\u5370\u4e5f\u5df2\u7ecf\u88ab\u968f\u673a\u7684\u65b0\u96ea\u8986\u76d6\uff0c\u65e0\u6cd5\u7cbe\u786e\u6eaf\u6e90\u3002\u4f46 Flow Matching \u91c7\u7528\u7684\u662f\u5e38\u5fae\u5206\u65b9\u7a0b (ODE)\u3002ODE \u7684\u6700\u5927\u9b45\u529b\u5728\u4e8e\u5176<strong>\u7edd\u5bf9\u7684\u786e\u5b9a\u6027<\/strong>\u2014\u2014\u53ea\u8981\u7ed9\u5b9a\u4e86\u521d\u59cb\u6761\u4ef6\u548c\u901f\u5ea6\u573a\uff0c\u7c92\u5b50\u7684\u8fd0\u52a8\u8f68\u8ff9\u662f\u552f\u4e00\u4e14\u53ef\u9006\u7684\u3002\u7406\u8bba\u4e0a\uff0c\u6b63\u5411\u751f\u6210\u662f\u987a\u7740\u65f6\u95f4\u6b65\u524d\u8fdb\uff1a$x_{t+dt} = x_t + v_\\theta(x_t, t) \\cdot dt$\uff0c\u90a3\u4e48\u9006\u5411\u53cd\u6f14\u53ea\u9700\u8981\u628a\u65f6\u95f4\u6b65\u548c\u901f\u5ea6\u573a\u53cd\u8f6c\uff0c\u5c31\u80fd\u4e25\u4e1d\u5408\u7f1d\u5730\u9000\u56de\u539f\u59cb\u566a\u58f0\u3002\u8fd9\u79cd\u65e0\u635f\u53cd\u6f14\u7684\u7279\u6027\u5728\u7cbe\u786e\u56fe\u50cf\u7f16\u8f91\u3001<strong>\u9690\u5f62\u6c34\u5370\u7684\u5d4c\u5165\u4e0e\u63d0\u53d6<\/strong>\u7b49\u8981\u6c42\u6781\u9ad8\u4fdd\u771f\u5ea6\u7684\u5e95\u5c42\u89c6\u89c9\u4efb\u52a1\u4e2d\uff0c\u662f\u4e0d\u53ef\u6216\u7f3a\u7684\u57fa\u77f3\u3002<\/p>\n\n\n\n<h4 class=\"wp-block-heading\">2. \u6b27\u62c9\u6c42\u89e3\u5668\u7684\u79bb\u6563\u5316\u622a\u65ad\u8bef\u5dee (Discretization Error)<\/h4>\n\n\n\n<p>\u7406\u8bba\u867d\u7136\u5b8c\u7f8e\uff0c\u4f46\u5b9e\u9645\u4ee3\u7801\u4e2d\u4f7f\u7528\u7684 Euler\uff08\u6b27\u62c9\uff09\u4e00\u9636\u6b65\u8fdb\u6cd5\u6253\u7834\u4e86\u65e0\u635f\u7684\u795e\u8bdd\u3002\u5728\u79bb\u6563\u7684\u8ba1\u7b97\u4e2d\uff0c\u65f6\u95f4\u6b65 $dt$ \u662f\u6709\u9650\u5927\u5c0f\u7684\u3002\u5f53\u6a21\u578b\u5728\u6b63\u5411\u8d77\u70b9 $A$ \u8ba1\u7b97\u51fa\u5207\u7ebf\u901f\u5ea6 $v_A$ \u5e76\u76f4\u884c\u8d70\u5230 $B$ \u70b9\u65f6\uff0c\u7531\u4e8e\u771f\u5b9e\u7684\u6d41\u5339\u914d\u8f68\u8ff9\u5e26\u6709\u66f2\u7387\uff0c$B$ \u70b9\u7684\u5207\u7ebf\u901f\u5ea6 $v_B$ \u4e0e $v_A$ \u5e76\u4e0d\u76f8\u7b49\u3002\u56e0\u6b64\uff0c\u5f53\u6211\u4eec\u8bd5\u56fe\u4ece\u7ec8\u70b9 $B$ \u53cd\u63a8\u65f6\uff0c\u6a21\u578b\u53ea\u80fd\u57fa\u4e8e\u5f53\u524d\u7684 $v_B$ \u7684\u5207\u7ebf\u901f\u5ea6\u7ed9\u6307\u8def\uff1a$A&#8217; = B &#8211; v_B \\cdot dt$\u3002\u7531\u4e8e $v_A \\neq v_B$\uff0c\u53cd\u63a8\u51fa\u6765\u7684\u8d77\u70b9 $A&#8217;$ \u5fc5\u7136\u504f\u79bb\u771f\u6b63\u7684 $A$\u3002\u8fd9\u79cd\u5c40\u90e8\u622a\u65ad\u8bef\u5dee\u5728\u591a\u6b21\u5faa\u73af\u4e2d\u4e0d\u65ad\u7d2f\u52a0\uff0c\u5bfc\u81f4\u5355\u7eaf\u4f9d\u9760\u98a0\u5012\u5faa\u73af\u7684\u6734\u7d20\u53cd\u6f14\uff08Naive Inversion\uff09\u65e0\u6cd5\u505a\u5230\u771f\u6b63\u610f\u4e49\u4e0a\u7684 $100\\%$ \u65e0\u635f\u3002\u4f46\u662f\u589e\u52a0\u6b65\u6570\uff08step\uff09\u4f1a\u6709\u6781\u5927\u7684\u63d0\u5347\u3002\u56e0\u4e3astep\u8d8a\u591a\uff0c\u95f4\u9694\u8d8a\u5c0f\uff0c\u4e00\u6b21step\u8d70\u7684\u8def\u5f84\u504f\u5dee\u4e5f\u5c31\u5c0f\u3002<\/p>\n\n\n\n<h4 class=\"wp-block-heading\">3. CFG (\u65e0\u5206\u7c7b\u5668\u5f15\u5bfc) \u5e26\u6765\u7684\u8f68\u8ff9\u626d\u66f2\u3002<\/h4>\n\n\n\n<p>\u5982\u679c\u8bf4\u6b27\u62c9\u622a\u65ad\u8bef\u5dee\u662f\u539f\u7f6a\uff0c\u90a3\u4e48 CFG \u5c31\u662f\u5c06\u8bef\u5dee\u6025\u5267\u653e\u5927\u7684\u6760\u6746\u3002\u8981\u6cbf\u7740\u539f\u8def\u9000\u56de\uff0c\u6570\u5b66\u4e0a\u8981\u6c42\u9006\u5411\u53cd\u63a8\u5fc5\u987b\u4f7f\u7528\u4e0e\u6b63\u5411\u751f\u6210<strong>\u5b8c\u5168\u4e00\u81f4<\/strong>\u7684 CFG Scale\u3002\u7136\u800c\uff0c\u9ad8 CFG \u4f1a\u6781\u5927\u5730\u52a0\u5267\u901f\u5ea6\u573a\u5411\u91cf\u7684\u975e\u7ebf\u6027\u7a0b\u5ea6\uff0c\u628a\u539f\u672c\u5e73\u7f13\u7684\u201c\u5fae\u5f2f\u76f4\u9053\u201d\u5f3a\u884c\u626d\u66f2\u6210\u201c\u6025\u8f6c\u5f2f\u201d\u3002\u8fd9\u79cd\u9ad8\u66f2\u7387\u4f7f\u5f97\u524d\u6587\u63d0\u5230\u7684\u622a\u65ad\u8bef\u5dee\u88ab\u6307\u6570\u7ea7\u653e\u5927\u3002\u5982\u679c\u4e3a\u4e86\u964d\u4f4e\u66f2\u7387\u3001\u5e73\u6ed1\u8f68\u8ff9\u800c\u5728\u53cd\u63a8\u65f6\u5173\u95ed CFG\uff08\u8bbe\u4e3a 1.0\uff09\uff0c\u5219\u76f8\u5f53\u4e8e\u76f4\u63a5\u6362\u4e86\u4e00\u5957\u5b8c\u5168\u4e0d\u540c\u7684\u901f\u5ea6\u573a\uff0c\u4ece\u6839\u672c\u4e0a\u8fdd\u80cc\u4e86\u539f\u8def\u5f84\uff0c\u5bfc\u81f4\u7b97\u51fa\u7684\u566a\u58f0\u5b8c\u5168\u662f\u53e6\u4e00\u56de\u4e8b\u3002<\/p>\n\n\n\n<h4 class=\"wp-block-heading\">4. VAE \u7f16\u89e3\u7801\u4e0e 8-bit \u91cf\u5316\u7684\u201c\u4fe1\u606f\u9ed1\u6d1e\u201d<\/h4>\n\n\n\n<p>\u629b\u5f00\u7eaf Latent \u7a7a\u95f4\u7684\u6570\u5b66\u535a\u5f08\uff0c\u5728\u5de5\u7a0b\u843d\u5730\u65f6\u8fd8\u6a2a\u4e98\u7740\u65e0\u6cd5\u903e\u8d8a\u7684\u7269\u7406\u5c4f\u969c\u3002\u7b2c\u4e00\u5c42\u662f VAE \u81ea\u8eab\u7684\u975e\u5b8c\u7f8e\u5bf9\u79f0\u6027\u4f5c\u4e3a\u6709\u635f\u81ea\u7f16\u7801\u5668\uff0c$Encode(Decode(z))$ \u6c38\u8fdc\u5b58\u5728\u5fae\u5c0f\u7578\u53d8\u3002\u7b2c\u4e8c\u5c42\u5219\u662f\u81f4\u547d\u7684\u56fe\u50cf\u683c\u5f0f\u622a\u65ad\uff1a\u5c06 Float16 \u7cbe\u5ea6\u7684\u9ad8\u7ef4\u5f20\u91cf\u89e3\u7801\uff0c\u5e76\u5f3a\u5236 Clamp \u622a\u65ad\u540e\u4fdd\u5b58\u4e3a 0-255 \u7684 8-bit RGB \u56fe\u50cf\uff08\u5982 PNG\uff09\u65f6\uff0c\u5c0f\u6570\u70b9\u540e\u7684\u6d77\u91cf\u5fae\u89c2\u7279\u5f81\u77ac\u95f4\u6e6e\u706d\u3002\u5f53\u8fd9\u5f20\u56fe\u50cf\u88ab\u91cd\u65b0\u8bfb\u53d6\u5e76 Encode \u56de\u53bb\u5bfb\u627e\u8d77\u70b9\u65f6\uff0c\u5750\u6807\u65e9\u5df2\u53d1\u751f\u4e86\u6570\u5341 dB \u7684\u707e\u96be\u6027\u504f\u79fb\u3002\u57fa\u4e8e\u8fd9\u6837\u4e00\u4e2a\u5343\u75ae\u767e\u5b54\u7684\u8d77\u70b9\u53bb\u9006\u63a8 ODE\uff0c\u65e0\u5f02\u4e8e\u523b\u821f\u6c42\u5251\u3002<\/p>\n\n\n\n<h1 class=\"wp-block-heading\">\u4ee3\u7801<\/h1>\n\n\n\n<h2 class=\"wp-block-heading\">SD3.5-medium<\/h2>\n\n\n\n<h4 class=\"wp-block-heading\">Forward\uff08\u4e0d\u7528Diffusers\uff09<\/h4>\n\n\n\n<pre class=\"wp-block-code\"><code>from tqdm import tqdm\nimport torch\nimport torch.nn.functional as F\nfrom diffusers import (\n    SD3Transformer2DModel,\n    FlowMatchEulerDiscreteScheduler,\n    AutoencoderKL\n)\n\nfrom transformers import (\n    CLIPTokenizer,\n    CLIPTextModelWithProjection,\n    T5TokenizerFast,\n    T5EncoderModel\n)\nfrom utils import calculate_latent_metrics\n\nfrom PIL import Image\nimport numpy as np\n\ndevice = \"cuda\"\ndtype = torch.float16\nmodel_id = \"stabilityai\/stable-diffusion-3.5-medium\"\n\nprompt = \"Astronaut in a jungle, cold color palette, muted colors, detailed, 8k\"\nnegative_prompt = \"\" # \u5f15\u5165\u8d1f\u5411 prompt\uff08\u7a7a\u5b57\u7b26\u4e32\uff09\nguidance_scale = 4.5 # SD3.5 Medium \u9ed8\u8ba4\u7684 CFG scale\n\nsteps = 28\n\n\ndef init_models():\n    scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(\n        model_id,\n        subfolder=\"scheduler\"\n    )\n\n    transformer = SD3Transformer2DModel.from_pretrained(\n        model_id,\n        subfolder=\"transformer\",\n        torch_dtype=dtype\n    ).to(device)\n\n    vae = AutoencoderKL.from_pretrained(\n        model_id,\n        subfolder=\"vae\",\n        torch_dtype=dtype\n    ).to(device)\n\n    tokenizer = CLIPTokenizer.from_pretrained(model_id, subfolder=\"tokenizer\")\n    tokenizer_2 = CLIPTokenizer.from_pretrained(model_id, subfolder=\"tokenizer_2\")\n    tokenizer_3 = T5TokenizerFast.from_pretrained(model_id, subfolder=\"tokenizer_3\")\n\n    text_encoder = CLIPTextModelWithProjection.from_pretrained(\n        model_id, subfolder=\"text_encoder\", torch_dtype=dtype\n    ).to(device)\n\n    text_encoder_2 = CLIPTextModelWithProjection.from_pretrained(\n        model_id, subfolder=\"text_encoder_2\", torch_dtype=dtype\n    ).to(device)\n\n    text_encoder_3 = T5EncoderModel.from_pretrained(\n        model_id, subfolder=\"text_encoder_3\", torch_dtype=dtype\n    ).to(device)\n\n    return scheduler, transformer, vae, tokenizer, tokenizer_2, tokenizer_3, text_encoder, text_encoder_2, text_encoder_3\n\nscheduler, transformer, vae, tokenizer, tokenizer_2, tokenizer_3, text_encoder, text_encoder_2, text_encoder_3 = init_models()\n\n\n\n# ==========================================\n# 1. \u6587\u672c\u7f16\u7801 (\u6b63\u5411 + \u8d1f\u5411)\n# ==========================================\n\ndef encode_prompt(text):\n    # Tokenize\n    t1 = tokenizer(text, padding=\"max_length\", max_length=77, truncation=True, return_tensors=\"pt\")\n    t2 = tokenizer_2(text, padding=\"max_length\", max_length=77, truncation=True, return_tensors=\"pt\")\n    t3 = tokenizer_3(text, padding=\"max_length\", max_length=256, truncation=True, return_tensors=\"pt\")\n\n    # Encode\n    with torch.no_grad():\n        out1 = text_encoder(t1.input_ids.to(device), output_hidden_states=True)\n        out2 = text_encoder_2(t2.input_ids.to(device), output_hidden_states=True)\n        out3 = text_encoder_3(t3.input_ids.to(device))\n\n        # CLIP \u53d6\u5012\u6570\u7b2c\u4e8c\u5c42\uff0cT5 \u53d6\u6700\u540e\u4e00\u5c42\n        emb1 = out1.hidden_states&#91;-2]\n        emb2 = out2.hidden_states&#91;-2]\n        emb3 = out3.last_hidden_state\n\n        pooled1 = out1.text_embeds\n        pooled2 = out2.text_embeds\n\n    # \u7ef4\u5ea6\u5bf9\u9f50\u4e0e\u62fc\u63a5\n    clip_embeds = torch.cat(&#91;emb1, emb2], dim=-1)           # (1, 77, 2048)\n    clip_embeds = F.pad(clip_embeds, (0, 4096 - 2048))      # (1, 77, 4096)\n    \n    text_embeddings = torch.cat(&#91;clip_embeds, emb3], dim=1) # (1, 333, 4096)\n    pooled_embeds = torch.cat(&#91;pooled1, pooled2], dim=-1)   # (1, 2048)\n    \n    return text_embeddings, pooled_embeds\n\n# \u5206\u522b\u83b7\u53d6\u6b63\u5411\u548c\u65e0\u6761\u4ef6(\u8d1f\u5411)\u7684 Embeddings\npos_text_emb, pos_pooled_emb = encode_prompt(prompt)\nneg_text_emb, neg_pooled_emb = encode_prompt(negative_prompt)\n\n# \u5728 Batch \u7ef4\u5ea6\u62fc\u63a5 (Uncond \u5728\u524d\uff0cCond \u5728\u540e\uff0c\u8fd9\u662f diffusers \u7684\u4e60\u60ef)\n# shape: (2, 333, 4096) \u548c (2, 2048)\nbatched_text_embeddings = torch.cat(&#91;neg_text_emb, pos_text_emb], dim=0)\nbatched_pooled_embeds = torch.cat(&#91;neg_pooled_emb, pos_pooled_emb], dim=0)\n\n\n# ==========================================\n# 2. Latent \u521d\u59cb\u5316\u4e0e Scheduler \u8bbe\u7f6e\n# ==========================================\n\nori_latents = torch.randn((1, 16, 128, 128), device=device, dtype=dtype)\nlatents = ori_latents.clone()\nscheduler.set_timesteps(steps)\ntimesteps = scheduler.timesteps\nsigmas = scheduler.sigmas\n\nprint(f\"len(scheduler.timesteps))  # \u8f93\u51fa: {steps}\")\nprint(f\"len(scheduler.sigmas))     # \u8f93\u51fa: {steps+1}\")\n# ==========================================\n# 3. Flow Matching \u91c7\u6837\u5faa\u73af (\u5e26 CFG)\n# ==========================================\nprint(\"---Forward Sampling... ---\")\nfor i, t in tqdm(enumerate(timesteps)):\n    \n    # \u4e3a\u4e86 CFG\uff0c\u5c06 latents \u590d\u5236\u4e00\u4efd\u4ee5\u5339\u914d batch size = 2\n    latent_model_input = torch.cat(&#91;latents] * 2, dim=0)\n    \n    timestep = torch.full((latent_model_input.shape&#91;0],), int(t), device=device, dtype=torch.long)\n\n    with torch.no_grad():\n        velocity_pred = transformer(\n            hidden_states=latent_model_input,\n            timestep=timestep,\n            encoder_hidden_states=batched_text_embeddings,\n            pooled_projections=batched_pooled_embeds,\n            return_dict=False\n        )&#91;0]\n\n    # \u6267\u884c Classifier-Free Guidance\n    velocity_uncond, velocity_text = velocity_pred.chunk(2)\n    velocity_pred = velocity_uncond + guidance_scale * (velocity_text - velocity_uncond)\n\n    # Euler \u66f4\u65b0\u6b65\u8fdb\n    sigma = sigmas&#91;i]\n    sigma_next = sigmas&#91;i + 1]\n    dt = sigma_next - sigma\n    \n    latents = latents + velocity_pred * dt\n\n# ==========================================\n# 4. VAE Decode (shift_factor)\n# ==========================================\n\nlatents_for_vae = (latents \/ vae.config.scaling_factor) + vae.config.shift_factor\nwith torch.no_grad():\n    image = vae.decode(latents_for_vae).sample\n\nimage = (image \/ 2 + 0.5).clamp(0, 1)\nimage = image.cpu().permute(0, 2, 3, 1).numpy()&#91;0]\n\nimage = Image.fromarray((image * 255).astype(np.uint8))\nimage.save(\"output\/sd3_forward_with_cfg.png\")\n\n<\/code><\/pre>\n\n\n\n<h4 class=\"wp-block-heading\">\u76f4\u63a5\u4eceLatent Inverse<\/h4>\n\n\n\n<pre class=\"wp-block-code\"><code># ==========================================\n# \u9ad8\u7cbe\u5ea6\u53cd\u6f14\u6d4b\u8bd5 (\u63a5\u5728\u6b63\u5411\u751f\u6210\u5faa\u73af\u7ed3\u675f\u540e\uff0cVAE Decode \u4e4b\u524d)\n# ==========================================\n\nprint(\"\\n--- \u5f00\u59cb\u7eaf Latent \u95ed\u73af\u53cd\u6f14\u6d4b\u8bd5 ---\")\n# 1. \u6700\u521d\u7684\u566a\u58f0ori_latents\n# 2. \u76f4\u63a5\u83b7\u53d6\u6b63\u5411\u751f\u6210\u7ed3\u675f\u65f6\u7684 Latent (\u4e0d\u7ecf\u8fc7 VAE)\nlatents_inv = latents.clone()\n\n# \u91cd\u7f6e\u4e3a\u66f4\u9ad8\u7684timestep\uff0c\u4ee5\u786e\u4fdd\u8db3\u591f\u7684\u53cd\u6f14\u6b65\u9aa4\nscheduler.set_timesteps(steps)\n\n# 3. \u7ffb\u8f6c\u65f6\u95f4\u6b65\u548c sigma\ntimesteps_rev = scheduler.timesteps.flip(0)\nsigmas_rev = scheduler.sigmas.flip(0)\n\n# \u3010\u5173\u952e\u4fee\u6539\u3011\uff1a\u5fc5\u987b\u4f7f\u7528\u5b8c\u5168\u76f8\u540c\u7684 CFG \u624d\u80fd\u539f\u8def\u8fd4\u56de\ninv_guidance_scale = 4.5 \n\nfor i, t in tqdm(enumerate(timesteps_rev)):\n    \n    latent_model_input = torch.cat(&#91;latents_inv] * 2, dim=0)\n    timestep = torch.full((latent_model_input.shape&#91;0],), int(t), device=device, dtype=torch.long)\n\n    with torch.no_grad():\n        velocity_pred = transformer(\n            hidden_states=latent_model_input,\n            timestep=timestep,\n            encoder_hidden_states=batched_text_embeddings, \n            pooled_projections=batched_pooled_embeds,\n            return_dict=False\n        )&#91;0]\n\n    velocity_uncond, velocity_text = velocity_pred.chunk(2)\n    \n    # \u4f7f\u7528\u548c\u6b63\u5411\u4e00\u81f4\u7684 CFG\n    velocity_pred = velocity_uncond + inv_guidance_scale * (velocity_text - velocity_uncond)\n\n    sigma = sigmas_rev&#91;i]          \n    sigma_next = sigmas_rev&#91;i + 1] \n    dt = sigma_next - sigma        # \u6b63\u5411\u53cd\u63a8\uff0cdt \u4e3a\u6b63\u6570\n    \n    latents_inv = latents_inv + velocity_pred * dt\n\n# \u8ba1\u7b97\u6307\u6807\nprint(\"\\n--- \u95ed\u73af\u53cd\u6f14\u76f8\u4f3c\u5ea6\u7ed3\u679c ---\")\n# \u8c03\u7528\u4f60\u4e4b\u524d\u7684 calculate_latent_metrics \u51fd\u6570\nmse, cos_sim, psnr = calculate_latent_metrics(ori_latents, latents_inv)\nprint(f\"PSNR: {psnr:.2f} dB, MSE: {mse:.6f}, Cosine Similarity: {cos_sim:.6f}\")\n<\/code><\/pre>\n\n\n\n<h4 class=\"wp-block-heading\">\u4ece\u56fe\u7247Inverse<\/h4>\n\n\n\n<pre class=\"wp-block-code\"><code>\n'''\nInverse\n'''\nfrom torchvision import transforms\n\nprint(\"\\n--- \u5f00\u59cb\u9006\u5411\u53cd\u6f14 (Inversion) ---\")\n\ninit_image = Image.open(\"output\/sd3_forward_with_cfg.png\").convert(\"RGB\")\nimage_tensor = transforms.ToTensor()(init_image).unsqueeze(0).to(device, dtype)\nimage_tensor = image_tensor * 2.0 - 1.0 \n\nwith torch.no_grad():\n    latents_inv = vae.encode(image_tensor).latent_dist.mean \n\nlatents_inv = (latents_inv - vae.config.shift_factor) * vae.config.scaling_factor\n\nprint(\"\\n--- \u7eaf VAE + PNG \u538b\u7f29\u5e26\u6765\u7684\u8bef\u5dee ---\")\nmse, cos_sim, psnr = calculate_latent_metrics(latents_inv, latents_for_vae)\nprint(f\"PSNR: {psnr:.2f} dB, MSE: {mse:.6f}, Cosine Similarity: {cos_sim:.6f}\")\n\nscheduler.set_timesteps(steps)\n\ntimesteps_rev = scheduler.timesteps.flip(0)\nsigmas_rev = scheduler.sigmas.flip(0)\n\ninv_guidance_scale = 4.5\n\n\nfor i, t in tqdm(enumerate(timesteps_rev)):\n    \n    latent_model_input = torch.cat(&#91;latents_inv] * 2, dim=0)\n    timestep = torch.full((latent_model_input.shape&#91;0],), int(t), device=device, dtype=torch.long)\n\n    with torch.no_grad():\n        velocity_pred = transformer(\n            hidden_states=latent_model_input,\n            timestep=timestep,\n            encoder_hidden_states=batched_text_embeddings, \n            pooled_projections=batched_pooled_embeds,\n            return_dict=False\n        )&#91;0]\n\n    velocity_uncond, velocity_text = velocity_pred.chunk(2)\n    velocity_pred = velocity_uncond + inv_guidance_scale * (velocity_text - velocity_uncond)\n\n    sigma = sigmas_rev&#91;i]          \n    sigma_next = sigmas_rev&#91;i + 1] \n    \n    dt = sigma_next - sigma\n    \n    latents_inv = latents_inv + velocity_pred * dt\n\nprint(\"\u9006\u5411\u5b8c\u6210\uff01\u83b7\u5f97\u7684\u521d\u59cb\u566a\u58f0 shape:\", latents_inv.shape)\nmse, cos_sim, psnr = calculate_latent_metrics(ori_latents, latents_inv)\nprint(f\"PSNR: {psnr:.2f} dB, MSE: {mse:.6f}, Cosine Similarity: {cos_sim:.6f}\")<\/code><\/pre>\n","protected":false},"excerpt":{"rendered":"<p>Stable Diffusion 3\u53ca3.5\u5f00\u59cb\uff0c\u5305\u62ecflux\u7f16\u8f91\u6a21\u578b\uff0c\u90fd\u4e0d\u518d\u91c7\u7528\u4f20\u7edfDDPM\u7684\u566a\u58f0\u6269\u6563\u6a21\u5f0f\uff0c [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"footnotes":""},"categories":[84],"tags":[110,111,109],"class_list":["post-693","post","type-post","status-publish","format-standard","hentry","category-computer-vision","tag-ddpm","tag-flow-matching","tag-stable-diffusion"],"_links":{"self":[{"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/posts\/693","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/comments?post=693"}],"version-history":[{"count":7,"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/posts\/693\/revisions"}],"predecessor-version":[{"id":700,"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/posts\/693\/revisions\/700"}],"wp:attachment":[{"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/media?parent=693"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/categories?post=693"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.vcoco.top\/index.php\/wp-json\/wp\/v2\/tags?post=693"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}