bug描述 Describe the Bug
paddle.jit.save报错:ValueError: Function: forward doesn't exist in the Module transformed from AST.
model定义:
class SwinIR(nn.Layer):
def __init__(self, img_size=64, patch_size=1, in_chans=3,
embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6],
window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
norm_layer=nn.LayerNorm, ape=False, patch_norm=True,
upscale=2, img_range=1., upsampler='', resi_connection='1conv'):
super(SwinIR, self).__init__()
num_in_ch = in_chans
num_out_ch = in_chans
num_feat = 64
self.img_range = img_range
if in_chans == 3:
rgb_mean = np.array([0.4488, 0.4371, 0.4040],dtype=np.float32)
self.mean = paddle.Tensor(rgb_mean).reshape([1, 3, 1, 1])
else:
self.mean = paddle.zeros([1., 1., 1., 1.],dtype=paddle.float32)
self.upscale = upscale
self.upsampler = upsampler
self.window_size = window_size
self.conv_first = nn.Conv2D(num_in_ch, embed_dim, 3, 1, 1)
self.num_layers = len(depths)
self.embed_dim = embed_dim
self.ape = ape
self.patch_norm = patch_norm
self.num_features = embed_dim
self.mlp_ratio = mlp_ratio
self.patch_embed = PatchEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
num_patches = self.patch_embed.num_patches
patches_resolution = self.patch_embed.patches_resolution
self.patches_resolution = patches_resolution
self.patch_unembed = PatchUnEmbed(
img_size=img_size, patch_size=patch_size, in_chans=embed_dim, embed_dim=embed_dim,
norm_layer=norm_layer if self.patch_norm else None)
if self.ape:
self.absolute_pos_embed = paddle.nn.ParameterList([paddle.create_parameter(
shape=[1, num_patches, embed_dim], dtype='float32',
default_initializer=paddle.nn.initializer.TruncatedNormal(std=.02))])
self.pos_drop = nn.Dropout(p=drop_rate)
dpr = [x.item() for x in paddle.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
self.layers = nn.LayerList()
for i_layer in range(self.num_layers):
layer = RSTB(dim=embed_dim,
input_resolution=(patches_resolution[0],
patches_resolution[1]),
depth=depths[i_layer],
num_heads=num_heads[i_layer],
window_size=window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=qkv_bias, qk_scale=qk_scale,
drop=drop_rate, attn_drop=attn_drop_rate,
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results
downsample=None,
img_size=img_size,
patch_size=patch_size,
resi_connection=resi_connection
)
self.layers.append(layer)
self.norm = norm_layer(self.num_features)
if resi_connection == '1conv':
self.conv_after_body = nn.Conv2D(embed_dim, embed_dim, 3, 1, 1)
elif resi_connection == '3conv':
self.conv_after_body = nn.Sequential(nn.Conv2D(embed_dim, embed_dim // 4, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv2D(embed_dim // 4, embed_dim // 4, 1, 1, 0),
nn.LeakyReLU(negative_slope=0.2),
nn.Conv2D(embed_dim // 4, embed_dim, 3, 1, 1))
if self.upsampler == 'pixelshuffle':
self.conv_before_upsample = nn.Sequential(nn.Conv2D(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU())
self.upsample = Upsample(upscale, num_feat)
self.conv_last = nn.Conv2D(num_feat, num_out_ch, 3, 1, 1)
elif self.upsampler == 'pixelshuffledirect':
self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch,
(patches_resolution[0], patches_resolution[1]))
elif self.upsampler == 'nearest+conv':
self.conv_before_upsample = nn.Sequential(nn.Conv2D(embed_dim, num_feat, 3, 1, 1),
nn.LeakyReLU())
self.conv_up1 = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
if self.upscale == 4:
self.conv_up2 = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
self.conv_hr = nn.Conv2D(num_feat, num_feat, 3, 1, 1)
self.conv_last = nn.Conv2D(num_feat, num_out_ch, 3, 1, 1)
self.lrelu = nn.LeakyReLU(negative_slope=0.2)
else:
self.conv_last = nn.Conv2D(embed_dim, num_out_ch, 3, 1, 1)
def no_weight_decay(self):
return {'absolute_pos_embed'}
def no_weight_decay_keywords(self):
return {'relative_position_bias_table'}
def check_image_size(self, x):
_, _, h, w = x.shape
mod_pad_h = (self.window_size - h % self.window_size) % self.window_size
mod_pad_w = (self.window_size - w % self.window_size) % self.window_size
x = F.pad(x, (0, mod_pad_w, 0, mod_pad_h), 'reflect')
return x
def forward_features(self, x):
x_size = (x.shape[2], x.shape[3])
x = self.patch_embed(x)
if self.ape:
x = x + self.absolute_pos_embed
x = self.pos_drop(x)
for layer in self.layers:
x = layer(x, x_size)
x = self.norm(x)
x = self.patch_unembed(x, x_size)
return x
def forward(self, x):
H, W = x.shape[2:]
x = self.check_image_size(x)
x = (x - self.mean) * self.img_range
if self.upsampler == 'pixelshuffle':
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.conv_last(self.upsample(x))
elif self.upsampler == 'pixelshuffledirect':
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.upsample(x)
elif self.upsampler == 'nearest+conv':
x = self.conv_first(x)
x = self.conv_after_body(self.forward_features(x)) + x
x = self.conv_before_upsample(x)
x = self.lrelu(self.conv_up1(paddle.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
if self.upscale == 4:
x = self.lrelu(self.conv_up2(paddle.nn.functional.interpolate(x, scale_factor=2, mode='nearest')))
x = self.conv_last(self.lrelu(self.conv_hr(x)))
else:
x_first = self.conv_first(x)
res = self.conv_after_body(self.forward_features(x_first)) + x_first
x = x + self.conv_last(res)
x = x / self.img_range + self.mean
return x[:, :, :H*self.upscale, :W*self.upscale]
def flops(self):
flops = 0
H, W = self.patches_resolution
flops += H * W * 3 * self.embed_dim * 9
flops += self.patch_embed.flops()
for i, layer in enumerate(self.layers):
flops += layer.flops()
flops += H * W * 3 * self.embed_dim * self.embed_dim
flops += self.upsample.flops()
return flops
动态模型转静态模型代码:
from swinir import *
import paddle
paddle.set_device("cpu")
model = SwinIR(upscale=2, in_chans=3, img_size=64, window_size=8,
img_range=1., depths=[6, 6, 6, 6], embed_dim=60, num_heads=[6, 6, 6, 6],
mlp_ratio=2, upsampler='pixelshuffledirect', resi_connection='1conv')
model_dict = paddle.load("/home/aistudio/SwinIR/train_result/2022_10_18_00_36_17/fold_0/model/best/swinir_epoch98_score33.93.pdparams")
model.set_dict(model_dict)
shape = [1, 3, 320, 320]
model = paddle.jit.to_static(model, input_spec=[paddle.static.InputSpec(shape=shape, dtype='float32')])
paddle.jit.save(model, 'exported_models')
报错:
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
/tmp/ipykernel_160/1546829295.py in <module>
11 shape = [1, 3, 320, 320]
12 model = paddle.jit.to_static(model, input_spec=[paddle.static.InputSpec(shape=shape, dtype='float32')])
---> 13 paddle.jit.save(model, 'exported_models')
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/jit.py in wrapper(layer, path, input_spec,**configs)
629 for hook in _save_pre_hooks:
630 hook(layer, input_spec, configs)
--> 631 func(layer, path, input_spec,**configs)
632
633 return wrapper
<decorator-gen-222> in save(layer, path, input_spec,**configs)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/wrapped_decorator.py in __impl__(func, *args,**kwargs)
23 def __impl__(func, *args,**kwargs):
24 wrapped_func = decorator_func(func)
---> 25 return wrapped_func(*args,**kwargs)
26
27 return __impl__
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in __impl__(*args,**kwargs)
49 def __impl__(*args,**kwargs):
50 with framework._dygraph_guard(None):
---> 51 return func(*args,**kwargs)
52
53 return __impl__
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/jit.py in save(layer, path, input_spec,**configs)
859 if isinstance(static_func, StaticFunction):
860 concrete_program = static_func.concrete_program_specify_input_spec(
--> 861 inner_input_spec, with_hook=with_hook)
862 elif 'forward' == attr_func:
863 # transform in jit.save, if input_spec is incomplete, declarative will throw error
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in concrete_program_specify_input_spec(self, input_spec, with_hook)
526 if has_input_spec:
527 concrete_program, _ = self.get_concrete_program(
--> 528 *desired_input_spec, with_hook=with_hook)
529 return concrete_program
530 else:
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in get_concrete_program(self, *args,**kwargs)
434
435 # 3. check whether hit the cache or build a new program for the input arguments
--> 436 concrete_program, partial_program_layer = self._program_cache[cache_key]
437 return concrete_program, partial_program_layer
438
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in __getitem__(self, item)
799 self._recent_key = item_id
800 if item_id not in self._caches:
--> 801 self._caches[item_id] = self._build_once(item)
802 # Note: raise warnings if number of traced program is more than `max_tracing_count`
803 current_tracing_count = len(self._caches)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in _build_once(self, cache_key)
788 input_kwargs_spec=cache_key.input_kwargs_with_spec,
789 class_instance=cache_key.class_instance,
--> 790 **cache_key.kwargs)
791 return concrete_program, partial_program_from(concrete_program)
792
<decorator-gen-220> in from_func_spec(func_spec, input_spec, input_kwargs_spec, class_instance,**kwargs)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/wrapped_decorator.py in __impl__(func, *args,**kwargs)
23 def __impl__(func, *args,**kwargs):
24 wrapped_func = decorator_func(func)
---> 25 return wrapped_func(*args,**kwargs)
26
27 return __impl__
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/base.py in __impl__(*args,**kwargs)
49 def __impl__(*args,**kwargs):
50 with framework._dygraph_guard(None):
---> 51 return func(*args,**kwargs)
52
53 return __impl__
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in from_func_spec(func_spec, input_spec, input_kwargs_spec, class_instance,**kwargs)
692 # Transforms dygraph function into static function and caches it.
693 dygraph_function = func_spec.dygraph_function
--> 694 static_func = convert_to_static(dygraph_function)
695 # apply pre\post hook for outermost layer
696 hook_helper = HookHelper(dygraph_function, class_instance,
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in convert_to_static(function)
138 """
139 with _CACHE_LOCK:
--> 140 static_func = _FUNCTION_CACHE.convert_with_cache(function)
141 return static_func
142
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in convert_with_cache(self, func)
75
76 if static_func is None:
---> 77 static_func = self._convert(func)
78 self._converted_static_func_caches[func] = static_func
79
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/program_translator.py in _convert(self, func)
117
118 # Get static function from AST
--> 119 static_func, file_name = ast_to_func(root_wrapper.node, func)
120
121 create_and_update_origin_info_map(root_wrapper.node, static_func)
/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/dygraph/dygraph_to_static/utils.py in ast_to_func(ast_root, dyfunc, delete_on_exit)
510 raise ValueError(
511 'Function: %s doesn\'t exist in the Module transformed from AST.' %
--> 512 func_name)
513 # After transform dygraph function into callable_func saved in tmp file,
514 # it lost the global variables from imported statements or defined in source file.
ValueError: Function: forward doesn't exist in the Module transformed from AST.
其他补充信息 Additional Supplementary Information
- No response*
6条答案
按热度按时间ktecyv1j1#
您好,我们已经收到了您的问题,会安排技术人员尽快解答您的问题,请耐心等待。请您再次检查是否提供了清晰的问题描述、复现代码、环境&版本、报错信息等。同时,您也可以通过查看 官网API文档 、 常见问题 、 历史Issue 、 AI社区 来寻求解答。祝您生活愉快~
Hi! We've received your issue and please be patient to get responded. We will arrange technicians to answer your questions as soon as possible. Please make sure that you have posted enough message to demo your request. You may also check out the API , FAQ , Github Issue and AI community to get the answer.Have a nice day!
nfs0ujit2#
您好,请确认一下您的Paddle版本及对应的动转静api使用方法,paddle2.0~paddle2.4的paddle.jit.save使用方法如下:https://www.paddlepaddle.org.cn/documentation/docs/zh/2.4rc/api/paddle/jit/save_cn.html#save ,当使用 paddle.jit.save 保存 function 时, function 不能包含参数变量,如果必须保存参数变量,请用Layer封装function,然后按照处理Layer的方式调用相应的API。
knsnq2tg3#
您好,请确认一下您的Paddle版本及对应的动转静api使用方法,paddle2.0~paddle2.4的paddle.jit.save使用方法如下:https://www.paddlepaddle.org.cn/documentation/docs/zh/2.4rc/api/paddle/jit/save_cn.html#save ,当使用 paddle.jit.save 保存 function 时, function 不能包含参数变量,如果必须保存参数变量,请用Layer封装function,然后按照处理Layer的方式调用相应的API。
请问意思是在nn.Layer类中除了def forward方法不能有其他方法吗?
wd2eg0qa4#
你好,通过代码来看你的save使用是规范的, layer中是可以有其他方法的,本地也进行了实验,将init于forward(forward中直接返回linear)中的调用注解掉本地是可以运行通过的,建议您排查一下其他代码。 https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html 另外可使用paddle最新代码。
0md85ypi5#
你好,通过代码来看你的save使用是规范的, layer中是可以有其他方法的,本地也进行了实验,将init于forward(forward中直接返回linear)中的调用注解掉本地是可以运行通过的,建议您排查一下其他代码。 https://www.paddlepaddle.org.cn/install/quick?docurl=/documentation/docs/zh/install/pip/linux-pip.html 另外可使用paddle最新代码。
我将所有代码都放上来了,可以麻烦你再帮我看看吗?
rkue9o1l6#
你好 建议您先通过注解代码进行问题定位,也可参考动转静调试文档进行调试 https://www.paddlepaddle.org.cn/documentation/docs/zh/develop/guides/jit/debugging_cn.html 感谢