Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
NVIDIA
GitHub Repository: NVIDIA/cuda-q-academic
Path: blob/main/qaoa-for-max-cut/Example-02-step-1.py
579 views
1
# SPDX-License-Identifier: Apache-2.0 AND CC-BY-NC-4.0
2
#
3
# Licensed under the Apache License, Version 2.0 (the "License");
4
# you may not use this file except in compliance with the License.
5
# You may obtain a copy of the License at
6
#
7
# http://www.apache.org/licenses/LICENSE-2.0
8
#
9
# Unless required by applicable law or agreed to in writing, software
10
# distributed under the License is distributed on an "AS IS" BASIS,
11
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
# See the License for the specific language governing permissions and
13
# limitations under the License.
14
15
# Defining functions to generate the Hamiltonian and Kernel for a given graph
16
# Necessary packages
17
import networkx as nx
18
from networkx import algorithms
19
from networkx.algorithms import community
20
import cudaq
21
from cudaq import spin
22
from cudaq.qis import *
23
import numpy as np
24
from typing import List, Tuple
25
from mpi4py import MPI
26
27
# Getting information about platform
28
cudaq.set_target("nvidia")
29
target = cudaq.get_target()
30
31
# Setting up MPI
32
comm = MPI.COMM_WORLD
33
rank = comm.Get_rank()
34
num_qpus = comm.Get_size()
35
36
#######################################################
37
# Step 1
38
#######################################################
39
# Function to return a dictionary of subgraphs of the input graph
40
# using the greedy modularity maximization algorithm
41
42
def subgraphpartition(G,n):
43
"""Divide the graph up into at most n subgraphs
44
45
Parameters
46
----------
47
G: networkX.Graph
48
Graph that we want to subdivdie
49
n : int
50
n is the maximum number of subgraphs in the partition
51
Returns
52
-------
53
dict of str : networkX.Graph
54
Dictionary of networkX graphs with a string as the key
55
"""
56
greedy_partition = community.greedy_modularity_communities(G, weight=None, resolution=1.1, cutoff=1, best_n=n)
57
number_of_subgraphs = len(greedy_partition)
58
59
graph_dictionary = {}
60
graph_names=[]
61
for i in range(number_of_subgraphs):
62
name='G'+str(i)
63
graph_names.append(name)
64
65
for i in range(number_of_subgraphs):
66
nodelist = sorted(list(greedy_partition[i]))
67
graph_dictionary[graph_names[i]] = nx.subgraph(G, nodelist)
68
69
return(graph_dictionary)
70
71
72
if rank ==0:
73
# Defining the example graph
74
# Random graph parameters
75
n = 30 # numnber of nodes
76
m = 70 # number of edges
77
seed = 20160 # seed random number generators for reproducibility
78
79
# Use seed for reproducibility
80
sampleGraph2 = nx.gnm_random_graph(n, m, seed=seed)
81
82
# Subdividing the graph
83
num_subgraphs_limit = min(12, len(sampleGraph2.nodes())) # maximum number of subgraphs for the partition
84
subgraph_dictionary = subgraphpartition(sampleGraph2,num_subgraphs_limit)
85
86
# Assign the subgraphs to the QPUs
87
number_of_subgraphs = len(sorted(subgraph_dictionary))
88
number_of_subgraphs_per_qpu = int(np.ceil(number_of_subgraphs/num_qpus))
89
90
keys_on_qpu ={}
91
92
for q in range(num_qpus):
93
keys_on_qpu[q]=[]
94
for k in range(number_of_subgraphs_per_qpu):
95
if (k*num_qpus+q < number_of_subgraphs):
96
key = sorted(subgraph_dictionary)[k*num_qpus+q]
97
keys_on_qpu[q].append(key)
98
print(keys_on_qpu[q],'=subgraph problems to be computed on processor',q)
99
100
# Distribute the subgraph data to the QPUs
101
for i in range(num_qpus):
102
subgraph_to_qpu ={}
103
for k in keys_on_qpu[i]:
104
subgraph_to_qpu[k]= subgraph_dictionary[k]
105
if i != 0:
106
comm.send(subgraph_to_qpu, dest=i, tag=rank)
107
print("{} sent by processor {}".format(subgraph_to_qpu, rank))
108
else:
109
assigned_subgraph_dictionary = subgraph_to_qpu
110
else:
111
# Receive the subgraph data
112
assigned_subgraph_dictionary= comm.recv(source=0, tag=0)
113
print("Processor {} received {} from processor {}".format(rank,assigned_subgraph_dictionary, 0))
114
115
116
117