扫二维码与项目经理沟通
我们在微信上24小时期待你的声音
解答本文疑问/技术咨询/运营咨询/技术建议/互联网交流
这篇文章给大家分享的是有关Pytorch如何提取模型特征向量保存至csv的内容。小编觉得挺实用的,因此分享给大家做个参考,一起跟随小编过来看看吧。
创新互联公司自2013年起,是专业互联网技术服务公司,拥有项目网站制作、网站建设网站策划,项目实施与项目整合能力。我们以让每一个梦想脱颖而出为使命,1280元方山做网站,已为上家服务,为方山各地企业和个人服务,联系电话:028-86922220Pytorch提取模型特征向量
# -*- coding: utf-8 -*- """ dj """ import torch import torch.nn as nn import os from torchvision import models, transforms from torch.autograd import Variable import numpy as np from PIL import Image import torchvision.models as models import pretrainedmodels import pandas as pd class FCViewer(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class M(nn.Module): def __init__(self, backbone1, drop, pretrained=True): super(M,self).__init__() if pretrained: img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained='imagenet') else: img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained=None) self.img_encoder = list(img_model.children())[:-2] self.img_encoder.append(nn.AdaptiveAvgPool2d(1)) self.img_encoder = nn.Sequential(*self.img_encoder) if drop > 0: self.img_fc = nn.Sequential(FCViewer()) else: self.img_fc = nn.Sequential( FCViewer()) def forward(self, x_img): x_img = self.img_encoder(x_img) x_img = self.img_fc(x_img) return x_img model1=M('resnet18',0,pretrained=True) features_dir = '/home/cc/Desktop/features' transform1 = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor()]) file_path='/home/cc/Desktop/picture' names = os.listdir(file_path) print(names) for name in names: pic=file_path+'/'+name img = Image.open(pic) img1 = transform1(img) x = Variable(torch.unsqueeze(img1, dim=0).float(), requires_grad=False) y = model1(x) y = y.data.numpy() y = y.tolist() #print(y) test=pd.DataFrame(data=y) #print(test) test.to_csv("/home/cc/Desktop/features/3.csv",mode='a+',index=None,header=None)
jiazaixunlianhaodemoxing
import torch import torch.nn.functional as F import torch.nn as nn import torch.optim as optim import torchvision import torchvision.transforms as transforms import argparse class ResidualBlock(nn.Module): def __init__(self, inchannel, outchannel, stride=1): super(ResidualBlock, self).__init__() self.left = nn.Sequential( nn.Conv2d(inchannel, outchannel, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(outchannel), nn.ReLU(inplace=True), nn.Conv2d(outchannel, outchannel, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(outchannel) ) self.shortcut = nn.Sequential() if stride != 1 or inchannel != outchannel: self.shortcut = nn.Sequential( nn.Conv2d(inchannel, outchannel, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(outchannel) ) def forward(self, x): out = self.left(x) out += self.shortcut(x) out = F.relu(out) return out class ResNet(nn.Module): def __init__(self, ResidualBlock, num_classes=10): super(ResNet, self).__init__() self.inchannel = 64 self.conv1 = nn.Sequential( nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False), nn.BatchNorm2d(64), nn.ReLU(), ) self.layer1 = self.make_layer(ResidualBlock, 64, 2, stride=1) self.layer2 = self.make_layer(ResidualBlock, 128, 2, stride=2) self.layer3 = self.make_layer(ResidualBlock, 256, 2, stride=2) self.layer4 = self.make_layer(ResidualBlock, 512, 2, stride=2) self.fc = nn.Linear(512, num_classes) def make_layer(self, block, channels, num_blocks, stride): strides = [stride] + [1] * (num_blocks - 1) #strides=[1,1] layers = [] for stride in strides: layers.append(block(self.inchannel, channels, stride)) self.inchannel = channels return nn.Sequential(*layers) def forward(self, x): out = self.conv1(x) out = self.layer1(out) out = self.layer2(out) out = self.layer3(out) out = self.layer4(out) out = F.avg_pool2d(out, 4) out = out.view(out.size(0), -1) out = self.fc(out) return out def ResNet18(): return ResNet(ResidualBlock) import os from torchvision import models, transforms from torch.autograd import Variable import numpy as np from PIL import Image import torchvision.models as models import pretrainedmodels import pandas as pd class FCViewer(nn.Module): def forward(self, x): return x.view(x.size(0), -1) class M(nn.Module): def __init__(self, backbone1, drop, pretrained=True): super(M,self).__init__() if pretrained: img_model = pretrainedmodels.__dict__[backbone1](num_classes=1000, pretrained='imagenet') else: img_model = ResNet18() we='/home/cc/Desktop/dj/model1/incption--7' # 模型定义-ResNet #net = ResNet18().to(device) img_model.load_state_dict(torch.load(we))#diaoyong self.img_encoder = list(img_model.children())[:-2] self.img_encoder.append(nn.AdaptiveAvgPool2d(1)) self.img_encoder = nn.Sequential(*self.img_encoder) if drop > 0: self.img_fc = nn.Sequential(FCViewer()) else: self.img_fc = nn.Sequential( FCViewer()) def forward(self, x_img): x_img = self.img_encoder(x_img) x_img = self.img_fc(x_img) return x_img model1=M('resnet18',0,pretrained=None) features_dir = '/home/cc/Desktop/features' transform1 = transforms.Compose([ transforms.Resize(56), transforms.CenterCrop(32), transforms.ToTensor()]) file_path='/home/cc/Desktop/picture' names = os.listdir(file_path) print(names) for name in names: pic=file_path+'/'+name img = Image.open(pic) img1 = transform1(img) x = Variable(torch.unsqueeze(img1, dim=0).float(), requires_grad=False) y = model1(x) y = y.data.numpy() y = y.tolist() #print(y) test=pd.DataFrame(data=y) #print(test) test.to_csv("/home/cc/Desktop/features/3.csv",mode='a+',index=None,header=None)
感谢各位的阅读!关于“Pytorch如何提取模型特征向量保存至csv”这篇文章就分享到这里了,希望以上内容可以对大家有一定的帮助,让大家可以学到更多知识,如果觉得文章不错,可以把它分享出去让更多的人看到吧!
我们在微信上24小时期待你的声音
解答本文疑问/技术咨询/运营咨询/技术建议/互联网交流