#!/usr/bin/env python
3# -*- coding: utf-8 -*-"""Created on Wed Oct 23 11:45:49 2019
@author: wang"""
import torchfrom torch.autograd import Variable
import torch.nn as nnimport torch.nn.functional as F
Variable变量
- 定义: pytorch中用于存储可训练数据的容器
- 组成: Variable由可训练kernel的Tensor及其导数Tensor组成
- 特性分析:(1)Variable创建,(2)变量梯度计算
#1、Variable 创建
x=Variable(torch.Tensor(2,2))
print("x variable: ",x) # 输出 x.data
print ("x.data: ",x.data, ",x.grad: ",x.grad)
#2、计算导数
x = Variable(torch.Tensor([[1,2],[3,4]]), requires_grad=True)
v_out = torch.mean(x*x) # 输出一个标量
print(x.grad) # 计算前没有梯度 None
v_out.backward(retain_graph=True) # 反向传播, 支持梯度叠加
print(x.grad) # 计算前没有梯度 None
'''tensor([[0.5000, 1.0000],
[1.5000, 2.0000]])'''
v_out.backward(retain_graph=True) # 反向传播, 支持梯度叠加
print(x.grad) # 计算前没有梯度 None
'''tensor([[1., 2.],
[3., 4.]])'''
x.grad = None # 手动释放梯度
v_out.backward() # 反向传播, 支持梯度叠加
损失函数
- BCELoss 二分类损失函数
m = nn.Sigmoid() # 计算sigmoid值
loss = nn.BCELoss()
x = torch.randn(3,requires_grad=True)
target = torch.empty(3).random_(2) # 输出 0, 1 值
output = loss(m(x), target) # 输入为 0 - 1之间的值
output.backward()
print(x)
print(m(x))
print(target)
print(output)
print(x.grad)
x.grad = None
2. BCEWithLogitsLoss
loss = nn.BCEWithLogitsLoss()
x = torch.randn(3,requires_grad=True)
target = torch.empty(3).random_(2)
output = loss(x, target)
print(x)
print(m(x))
print(target)
print(output)
print(x.grad)
x.grad = None
3. 负对数似然损失函数 NLLLoss
m = nn.LogSoftmax(dim=1)
loss = nn.NLLLoss()# input is of size N x C = 3 x 5
x = torch.randn(3,5,requires_grad=True)#each element in target has to have 0 <= value < C
target = torch.tensor([1,0,4])
output = loss(m(x), target)
print(output * 3)
print(m(x)[0,1] + m(x)[1,0] + m(x)[2,4])
4. 交叉熵损失函数 CrossEntropyLoss
loss = nn.CrossEntropyLoss()# input is of size N x C = 3 x 5
x = torch.randn(3,5,requires_grad=True)#each element in target has to have 0 <= value < C
target = torch.empty(3, dtype=torch.long).random_(5)
output = loss(x, target)
print(output)
5. L1损失函数
loss = nn.L1Loss()
x = torch.randn(1, 2, requires_grad=True)#tensor([[-0.0625, -2.1603]], requires_grad=True)
target = torch.randn(1, 2)#tensor([[0.6789, 0.9831]])
output = loss(x, target)#tensor(1.9424, grad_fn=<L1LossBackward>)
print(output)
6. L2 损失函数
loss = nn.MSELoss()
x = torch.randn(1, 2, requires_grad=True)#tensor([[-1.4445, -2.4888]], requires_grad=True)
target = torch.randn(1, 2)#tensor([[ 0.7117, -0.1200]])
output = loss(x, target)#tensor(5.1303, grad_fn=<MseLossBackward>)
print(output)
loss = nn.SmoothL1Loss()
x = torch.randn(1, 2, requires_grad=True)#tensor([[-1.4445, -2.4888]], requires_grad=True)
target = torch.randn(1, 2)#tensor([[ 0.7117, -0.1200]])
output = loss(x, target)#tensor(5.1303, grad_fn=<MseLossBackward>)
print(output)
https://blog.csdn.net/q511951451/article/details/102702442
https://www.jb51.net/article/177665.htm