Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-09-2-xor-nn.ipynb
618 views
Kernel: Python 3
# Lab 9 XOR import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu' # for reproducibility torch.manual_seed(777) if device == 'cuda': torch.cuda.manual_seed_all(777)
X = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]).to(device) Y = torch.FloatTensor([[0], [1], [1], [0]]).to(device)
# nn layers linear1 = torch.nn.Linear(2, 2, bias=True) linear2 = torch.nn.Linear(2, 1, bias=True) sigmoid = torch.nn.Sigmoid()
# model model = torch.nn.Sequential(linear1, sigmoid, linear2, sigmoid).to(device)
# define cost/loss & optimizer criterion = torch.nn.BCELoss().to(device) optimizer = torch.optim.SGD(model.parameters(), lr=1) # modified learning rate from 0.1 to 1
for step in range(10001): optimizer.zero_grad() hypothesis = model(X) # cost/loss function cost = criterion(hypothesis, Y) cost.backward() optimizer.step() if step % 100 == 0: print(step, cost.item())
0 0.7434073090553284 100 0.6931650638580322 200 0.6931577920913696 300 0.6931517124176025 400 0.6931463479995728 500 0.6931411027908325 600 0.693135678768158 700 0.6931295394897461 800 0.693122148513794 900 0.6931126713752747 1000 0.6930999755859375 1100 0.693082332611084 1200 0.6930568814277649 1300 0.6930190920829773 1400 0.6929606199264526 1500 0.6928659677505493 1600 0.6927032470703125 1700 0.6923960447311401 1800 0.6917301416397095 1900 0.6899654865264893 2000 0.6838318109512329 2100 0.6561676263809204 2200 0.4311096668243408 2300 0.1348954439163208 2400 0.0663050040602684 2500 0.04216844588518143 2600 0.03045402094721794 2700 0.02366602048277855 2800 0.019277796149253845 2900 0.01622406765818596 3000 0.013983823359012604 3100 0.012273991480469704 3200 0.010928178206086159 3300 0.009842487052083015 3400 0.008949032984673977 3500 0.008201336488127708 3600 0.007566767744719982 3700 0.007021686062216759 3800 0.006548595614731312 3900 0.006134253926575184 4000 0.005768374539911747 4100 0.0054430365562438965 4200 0.005151890218257904 4300 0.0048899175599217415 4400 0.004652872681617737 4500 0.004437457304447889 4600 0.004240859299898148 4700 0.00406070239841938 4800 0.0038950315210968256 4900 0.003742194501683116 5000 0.003600734518840909 5100 0.0034694799687713385 5200 0.0033473046496510506 5300 0.0032333978451788425 5400 0.0031268750317394733 5500 0.0030270610004663467 5600 0.002933340147137642 5700 0.0028452035039663315 5800 0.002762140706181526 5900 0.0026837773621082306 6000 0.0026096487417817116 6100 0.0025394847616553307 6200 0.0024729417636990547 6300 0.0024097643326967955 6400 0.0023497282527387142 6500 0.0022925485391169786 6600 0.002238075714558363 6700 0.002186085097491741 6800 0.0021364721469581127 6900 0.002089011948555708 7000 0.0020436146296560764 7100 0.0020001311786472797 7200 0.0019584116525948048 7300 0.0019184107659384608 7400 0.0018799942918121815 7500 0.0018430722411721945 7600 0.0018075400730594993 7700 0.0017733527347445488 7800 0.0017404207028448582 7900 0.0017087138257920742 8000 0.001678097527474165 8100 0.0016485570231452584 8200 0.001620002556592226 8300 0.0015924491453915834 8400 0.0015657917829230428 8500 0.0015400308184325695 8600 0.0015150615945458412 8700 0.001490913680754602 8800 0.0014674977865070105 8900 0.001444813678972423 9000 0.0014228166546672583 9100 0.0014014765620231628 9200 0.0013806892093271017 9300 0.0013606036081910133 9400 0.0013410557294264436 9500 0.001322030322626233 9600 0.001303557539358735 9700 0.001285637030377984 9800 0.0012681199004873633 9900 0.0012511102249845862 10000 0.0012345188297331333
# Accuracy computation # True if hypothesis>0.5 else False with torch.no_grad(): predicted = (model(X) > 0.5).float() accuracy = (predicted == Y).float().mean() print('\nHypothesis: ', hypothesis.detach().cpu().numpy(), '\nCorrect: ', predicted.detach().cpu().numpy(), '\nAccuracy: ', accuracy.item())
Hypothesis: [[0.00106378] [0.9988938 ] [0.9988939 ] [0.00165883]] Correct: [[0.] [1.] [1.] [0.]] Accuracy: 1.0