Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
deeplearningzerotoall
GitHub Repository: deeplearningzerotoall/PyTorch
Path: blob/master/CNN/lab-09-3-xor-nn-wide-deep.ipynb
631 views
Kernel: Python 3
# Lab 9 XOR import torch
device = 'cuda' if torch.cuda.is_available() else 'cpu' # for reproducibility torch.manual_seed(777) if device == 'cuda': torch.cuda.manual_seed_all(777)
X = torch.FloatTensor([[0, 0], [0, 1], [1, 0], [1, 1]]).to(device) Y = torch.FloatTensor([[0], [1], [1], [0]]).to(device)
# nn layers linear1 = torch.nn.Linear(2, 10, bias=True) linear2 = torch.nn.Linear(10, 10, bias=True) linear3 = torch.nn.Linear(10, 10, bias=True) linear4 = torch.nn.Linear(10, 1, bias=True) sigmoid = torch.nn.Sigmoid()
# model model = torch.nn.Sequential(linear1, sigmoid, linear2, sigmoid, linear3, sigmoid, linear4, sigmoid).to(device)
# define cost/loss & optimizer criterion = torch.nn.BCELoss().to(device) optimizer = torch.optim.SGD(model.parameters(), lr=1) # modified learning rate from 0.1 to 1
for step in range(10001): optimizer.zero_grad() hypothesis = model(X) # cost/loss function cost = criterion(hypothesis, Y) cost.backward() optimizer.step() if step % 100 == 0: print(step, cost.item())
0 0.6948983669281006 100 0.6931558847427368 200 0.6931535005569458 300 0.6931513547897339 400 0.6931493282318115 500 0.6931473016738892 600 0.6931453943252563 700 0.6931434869766235 800 0.6931416988372803 900 0.6931397914886475 1000 0.6931380033493042 1100 0.6931362152099609 1200 0.6931343078613281 1300 0.6931324005126953 1400 0.6931304931640625 1500 0.6931284666061401 1600 0.6931264400482178 1700 0.6931242942810059 1800 0.6931220293045044 1900 0.6931196451187134 2000 0.6931171417236328 2100 0.6931145191192627 2200 0.6931115984916687 2300 0.6931085586547852 2400 0.693105161190033 2500 0.6931014657020569 2600 0.6930974721908569 2700 0.6930930018424988 2800 0.6930880546569824 2900 0.6930825710296631 3000 0.6930763125419617 3100 0.6930692791938782 3200 0.6930612325668335 3300 0.6930519342422485 3400 0.693041205406189 3500 0.693028450012207 3600 0.6930133104324341 3700 0.6929951906204224 3800 0.6929729580879211 3900 0.6929453015327454 4000 0.6929103136062622 4100 0.6928650140762329 4200 0.6928046941757202 4300 0.6927220225334167 4400 0.692604124546051 4500 0.6924278736114502 4600 0.692147970199585 4700 0.6916665434837341 4800 0.6907395720481873 4900 0.6886204481124878 5000 0.6820821762084961 5100 0.6472558379173279 5200 0.4495784044265747 5300 0.041401054710149765 5400 0.00973653607070446 5500 0.0050338273867964745 5600 0.00329551356844604 5700 0.0024154414422810078 5800 0.0018910930957645178 5900 0.0015457704430446029 6000 0.0013024783693253994 6100 0.001122395507991314 6200 0.0009841559221968055 6300 0.0008749148109927773 6400 0.0007865495281293988 6500 0.0007136262720450759 6600 0.0006525927456095815 6700 0.000600747880525887 6800 0.0005561667494475842 6900 0.0005174618563614786 7000 0.0004836336011067033 7100 0.0004537721397355199 7200 0.0004272061923984438 7300 0.00040348825859837234 7400 0.00038214115193113685 7500 0.00036286652903072536 7600 0.00034532143035903573 7700 0.00032935672788880765 7800 0.000314718927256763 7900 0.00030131853418424726 8000 0.0002889616880565882 8100 0.0002774993481580168 8200 0.0002669314562808722 8300 0.0002570493088569492 8400 0.00024786783615127206 8500 0.00023931238683871925 8600 0.00023129362671170384 8700 0.0002237667649751529 8800 0.00021670199930667877 8900 0.00021005462622269988 9000 0.000203779898583889 9100 0.0001978629152290523 9200 0.00019222912669647485 9300 0.00018693818128667772 9400 0.00018191552953794599 9500 0.00017716118600219488 9600 0.00017261551693081856 9700 0.00016829342348501086 9800 0.00016415018762927502 9900 0.00016021561168599874 10000 0.0001565046259202063
# Accuracy computation # True if hypothesis>0.5 else False with torch.no_grad(): predicted = (model(X) > 0.5).float() accuracy = (predicted == Y).float().mean() print('\nHypothesis: ', hypothesis.detach().cpu().numpy(), '\nCorrect: ', predicted.detach().cpu().numpy(), '\nAccuracy: ', accuracy.item())
Hypothesis: [[1.1170952e-04] [9.9982882e-01] [9.9984229e-01] [1.8537771e-04]] Correct: [[0.] [1.] [1.] [0.]] Accuracy: 1.0