|
138 | 138 | },
|
139 | 139 | {
|
140 | 140 | "cell_type": "code",
|
141 |
| - "execution_count": 8, |
| 141 | + "execution_count": 6, |
142 | 142 | "metadata": {},
|
143 | 143 | "outputs": [
|
144 | 144 | {
|
145 | 145 | "name": "stderr",
|
146 | 146 | "output_type": "stream",
|
147 | 147 | "text": [
|
148 |
| - "load test data set: 2425it [00:01, 2307.14it/s]\n" |
| 148 | + "load test data set: 2425it [00:01, 2086.92it/s]\n", |
| 149 | + "d:\\ivs\\project\\004-research\\signal-processing\\image-processing\\remote-sensing\\aconvnet\\aconvnet-pytorch\\venv\\lib\\site-packages\\torch\\nn\\functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at ..\\c10/core/TensorImpl.h:1156.)\n", |
| 150 | + " return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n" |
149 | 151 | ]
|
150 | 152 | },
|
151 | 153 | {
|
|
212 | 214 | },
|
213 | 215 | {
|
214 | 216 | "cell_type": "code",
|
215 |
| - "execution_count": 39, |
| 217 | + "execution_count": 7, |
216 | 218 | "metadata": {},
|
217 | 219 | "outputs": [],
|
218 | 220 | "source": [
|
|
244 | 246 | },
|
245 | 247 | {
|
246 | 248 | "cell_type": "code",
|
247 |
| - "execution_count": 48, |
| 249 | + "execution_count": 8, |
248 | 250 | "metadata": {},
|
249 | 251 | "outputs": [
|
250 | 252 | {
|
|
277 | 279 | "plt.show()"
|
278 | 280 | ]
|
279 | 281 | },
|
| 282 | + { |
| 283 | + "cell_type": "markdown", |
| 284 | + "metadata": {}, |
| 285 | + "source": [ |
| 286 | + "### Noise Simulation" |
| 287 | + ] |
| 288 | + }, |
| 289 | + { |
| 290 | + "cell_type": "code", |
| 291 | + "execution_count": 15, |
| 292 | + "metadata": {}, |
| 293 | + "outputs": [], |
| 294 | + "source": [ |
| 295 | + "from skimage import util" |
| 296 | + ] |
| 297 | + }, |
| 298 | + { |
| 299 | + "cell_type": "code", |
| 300 | + "execution_count": 77, |
| 301 | + "metadata": {}, |
| 302 | + "outputs": [ |
| 303 | + { |
| 304 | + "data": { |
| 305 | + "text/plain": [ |
| 306 | + "(614.0, 7744)" |
| 307 | + ] |
| 308 | + }, |
| 309 | + "execution_count": 77, |
| 310 | + "metadata": {}, |
| 311 | + "output_type": "execute_result" |
| 312 | + } |
| 313 | + ], |
| 314 | + "source": [ |
| 315 | + "util.random_noise(np.zeros((88, 88)), mode='s&p', amount=0.15).sum(), 88 * 88" |
| 316 | + ] |
| 317 | + }, |
| 318 | + { |
| 319 | + "cell_type": "code", |
| 320 | + "execution_count": 99, |
| 321 | + "metadata": {}, |
| 322 | + "outputs": [], |
| 323 | + "source": [ |
| 324 | + "def generate_noise(_images, amount):\n", |
| 325 | + " \n", |
| 326 | + " n, _, h, w = _images.shape\n", |
| 327 | + " \n", |
| 328 | + " noise = np.random.uniform(size=(n, 1, h, w))\n", |
| 329 | + " portions = util.random_noise(np.zeros((n, 1, 88, 88)), mode='s&p', amount=amount)\n", |
| 330 | + " noise = noise * portions\n", |
| 331 | + " \n", |
| 332 | + " return _images + noise.astype(np.float32)\n", |
| 333 | + "\n", |
| 334 | + "\n", |
| 335 | + "def noise_simulation(_m, ds, noise_ratio):\n", |
| 336 | + " \n", |
| 337 | + " num_data = 0\n", |
| 338 | + " corrects = 0\n", |
| 339 | + " \n", |
| 340 | + " _m.net.eval()\n", |
| 341 | + " _softmax = torch.nn.Softmax(dim=1)\n", |
| 342 | + " for i, data in enumerate(ds):\n", |
| 343 | + " images, labels = data\n", |
| 344 | + " images = generate_noise(images, noise_ratio)\n", |
| 345 | + "\n", |
| 346 | + " predictions = _m.inference(images)\n", |
| 347 | + " predictions = _softmax(predictions)\n", |
| 348 | + "\n", |
| 349 | + " _, predictions = torch.max(predictions.data, 1)\n", |
| 350 | + " labels = labels.type(torch.LongTensor)\n", |
| 351 | + " num_data += labels.size(0)\n", |
| 352 | + " corrects += (predictions == labels.to(m.device)).sum().item()\n", |
| 353 | + "\n", |
| 354 | + " accuracy = 100 * corrects / num_data\n", |
| 355 | + " return accuracy" |
| 356 | + ] |
| 357 | + }, |
| 358 | + { |
| 359 | + "cell_type": "code", |
| 360 | + "execution_count": 103, |
| 361 | + "metadata": {}, |
| 362 | + "outputs": [ |
| 363 | + { |
| 364 | + "name": "stdout", |
| 365 | + "output_type": "stream", |
| 366 | + "text": [ |
| 367 | + "ratio = 0.01, accuracy = 98.56\n", |
| 368 | + "ratio = 0.05, accuracy = 94.39\n", |
| 369 | + "ratio = 0.10, accuracy = 85.03\n", |
| 370 | + "ratio = 0.15, accuracy = 73.65\n" |
| 371 | + ] |
| 372 | + } |
| 373 | + ], |
| 374 | + "source": [ |
| 375 | + "noise_result = {}\n", |
| 376 | + "\n", |
| 377 | + "for ratio in [0.01, 0.05, 0.10, 0.15]:\n", |
| 378 | + " noise_result[ratio] = noise_simulation(m, test_set, ratio)\n", |
| 379 | + " print(f'ratio = {ratio:.2f}, accuracy = {noise_result[ratio]:.2f}')\n" |
| 380 | + ] |
| 381 | + }, |
280 | 382 | {
|
281 | 383 | "cell_type": "code",
|
282 | 384 | "execution_count": null,
|
|
0 commit comments