Skip to content

Commit 6ecdf76

Browse files
authored
Merge pull request #4 from jangsoopark/v2.0.0
Noise simulation
2 parents 783fc80 + 6ef89d8 commit 6ecdf76

File tree

2 files changed

+122
-5
lines changed

2 files changed

+122
-5
lines changed

README.md

+16-1
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ $ python3 train.py
146146
```
147147

148148
#### Results of SOC
149-
- Final Accuracy is **99.18%**
149+
- Final Accuracy is **99.18%** (The official accuracy is 99.13%)
150150
- You can see the details in `notebook/experiments-SOC.ipynb`
151151

152152
- Visualization of training loss and test accuracy
@@ -157,6 +157,15 @@ $ python3 train.py
157157

158158
![soc-confusion-matrix](./assets/figure/soc-confusion-matrix.png)
159159

160+
- Noise Simulation [1]
161+
- i.i.d samples from a uniform distribution
162+
163+
| Noise | 1% | 5% | 10% | 15%|
164+
| :---: | :---: | :---: | :---: | :---: |
165+
| AConvNet-PyTorch | 98.56 | 94.39 | 85.03 | 73.65 |
166+
| AConvNet-Official | 91.76 | 88.52 | 75.84 | 54.68 |
167+
168+
160169
### Extended Operating Conditions (EOC)
161170

162171
### Outlier Rejection
@@ -189,6 +198,12 @@ $ python3 train.py
189198
}
190199
```
191200

201+
## References
202+
[1] G. Dong, N. Wang, and G. Kuang,
203+
"Sparse representation of monogenic signal: With application to target recognition in SAR images,"
204+
*IEEE Signal Process. Lett.*, vol. 21, no. 8, pp. 952-956, Aug. 2014.
205+
206+
192207
---
193208

194209
## TODO

notebook/experiments-SOC.ipynb

+106-4
Original file line numberDiff line numberDiff line change
@@ -138,14 +138,16 @@
138138
},
139139
{
140140
"cell_type": "code",
141-
"execution_count": 8,
141+
"execution_count": 6,
142142
"metadata": {},
143143
"outputs": [
144144
{
145145
"name": "stderr",
146146
"output_type": "stream",
147147
"text": [
148-
"load test data set: 2425it [00:01, 2307.14it/s]\n"
148+
"load test data set: 2425it [00:01, 2086.92it/s]\n",
149+
"d:\\ivs\\project\\004-research\\signal-processing\\image-processing\\remote-sensing\\aconvnet\\aconvnet-pytorch\\venv\\lib\\site-packages\\torch\\nn\\functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at ..\\c10/core/TensorImpl.h:1156.)\n",
150+
" return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n"
149151
]
150152
},
151153
{
@@ -212,7 +214,7 @@
212214
},
213215
{
214216
"cell_type": "code",
215-
"execution_count": 39,
217+
"execution_count": 7,
216218
"metadata": {},
217219
"outputs": [],
218220
"source": [
@@ -244,7 +246,7 @@
244246
},
245247
{
246248
"cell_type": "code",
247-
"execution_count": 48,
249+
"execution_count": 8,
248250
"metadata": {},
249251
"outputs": [
250252
{
@@ -277,6 +279,106 @@
277279
"plt.show()"
278280
]
279281
},
282+
{
283+
"cell_type": "markdown",
284+
"metadata": {},
285+
"source": [
286+
"### Noise Simulation"
287+
]
288+
},
289+
{
290+
"cell_type": "code",
291+
"execution_count": 15,
292+
"metadata": {},
293+
"outputs": [],
294+
"source": [
295+
"from skimage import util"
296+
]
297+
},
298+
{
299+
"cell_type": "code",
300+
"execution_count": 77,
301+
"metadata": {},
302+
"outputs": [
303+
{
304+
"data": {
305+
"text/plain": [
306+
"(614.0, 7744)"
307+
]
308+
},
309+
"execution_count": 77,
310+
"metadata": {},
311+
"output_type": "execute_result"
312+
}
313+
],
314+
"source": [
315+
"util.random_noise(np.zeros((88, 88)), mode='s&p', amount=0.15).sum(), 88 * 88"
316+
]
317+
},
318+
{
319+
"cell_type": "code",
320+
"execution_count": 99,
321+
"metadata": {},
322+
"outputs": [],
323+
"source": [
324+
"def generate_noise(_images, amount):\n",
325+
" \n",
326+
" n, _, h, w = _images.shape\n",
327+
" \n",
328+
" noise = np.random.uniform(size=(n, 1, h, w))\n",
329+
" portions = util.random_noise(np.zeros((n, 1, 88, 88)), mode='s&p', amount=amount)\n",
330+
" noise = noise * portions\n",
331+
" \n",
332+
" return _images + noise.astype(np.float32)\n",
333+
"\n",
334+
"\n",
335+
"def noise_simulation(_m, ds, noise_ratio):\n",
336+
" \n",
337+
" num_data = 0\n",
338+
" corrects = 0\n",
339+
" \n",
340+
" _m.net.eval()\n",
341+
" _softmax = torch.nn.Softmax(dim=1)\n",
342+
" for i, data in enumerate(ds):\n",
343+
" images, labels = data\n",
344+
" images = generate_noise(images, noise_ratio)\n",
345+
"\n",
346+
" predictions = _m.inference(images)\n",
347+
" predictions = _softmax(predictions)\n",
348+
"\n",
349+
" _, predictions = torch.max(predictions.data, 1)\n",
350+
" labels = labels.type(torch.LongTensor)\n",
351+
" num_data += labels.size(0)\n",
352+
" corrects += (predictions == labels.to(m.device)).sum().item()\n",
353+
"\n",
354+
" accuracy = 100 * corrects / num_data\n",
355+
" return accuracy"
356+
]
357+
},
358+
{
359+
"cell_type": "code",
360+
"execution_count": 103,
361+
"metadata": {},
362+
"outputs": [
363+
{
364+
"name": "stdout",
365+
"output_type": "stream",
366+
"text": [
367+
"ratio = 0.01, accuracy = 98.56\n",
368+
"ratio = 0.05, accuracy = 94.39\n",
369+
"ratio = 0.10, accuracy = 85.03\n",
370+
"ratio = 0.15, accuracy = 73.65\n"
371+
]
372+
}
373+
],
374+
"source": [
375+
"noise_result = {}\n",
376+
"\n",
377+
"for ratio in [0.01, 0.05, 0.10, 0.15]:\n",
378+
" noise_result[ratio] = noise_simulation(m, test_set, ratio)\n",
379+
" print(f'ratio = {ratio:.2f}, accuracy = {noise_result[ratio]:.2f}')\n"
380+
]
381+
},
280382
{
281383
"cell_type": "code",
282384
"execution_count": null,

0 commit comments

Comments
 (0)