Skip to content

Commit

Permalink
update v0.3.1
Browse files Browse the repository at this point in the history
  • Loading branch information
ZW-ZHANG committed Apr 22, 2022
1 parent 44ab9bd commit b892548
Show file tree
Hide file tree
Showing 38 changed files with 2,205 additions and 224 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
% [![Documentation Status](http://mn.cs.tsinghua.edu.cn/autogl/documentation/?badge=latest)](http://mn.cs.tsinghua.edu.cn/autogl/documentation/?badge=latest)-->

## 最新消息

- 2021.4.19 v0.3.1版本更新!首次更新中文教程!
- 2021.12.31 v0.3.0-pre版本更新!
- 智图目前支持[__Deep Graph Library (DGL)__](https://www.dgl.ai/)作为后端,以方便DGL的用户使用。目前在DGL后端已经支持同构图的节点分类、链接预测以及图分类等任务。智图现在也可兼容PyG 2.0版本。
- 智图可以支持__异构图__节点分类任务!详情请参考[异构图教程](http://mn.cs.tsinghua.edu.cn/autogl/documentation/docfile/tutorial/t_hetero_node_clf.html)
Expand Down
4 changes: 2 additions & 2 deletions README_en.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ An autoML framework & toolkit for machine learning on graphs.

*Actively under development by @THUMNLab*

Feel free to open <a href="https://github.com/THUMNLab/AutoGL/issues">issues</a> or contact us at <a href="mailto:autogl@tsinghua.edu.cn">autogl@tsinghua.edu.cn</a> if you have any comments or suggestions!
Feel free to open <a href="https://www.gitlink.org.cn/THUMNLab/AutoGL/issues">issues</a> or contact us at <a href="mailto:autogl@tsinghua.edu.cn">autogl@tsinghua.edu.cn</a> if you have any comments or suggestions!

<!--
[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black)
Expand Down Expand Up @@ -113,7 +113,7 @@ pip install autogl
Run the following command to install this package from the source.

```
git clone https://github.com/THUMNLab/AutoGL.git
git clone https://gitlink.org.cn/THUMNLab/AutoGL.git
cd AutoGL
python setup.py install
```
Expand Down
2 changes: 1 addition & 1 deletion autogl/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,4 +16,4 @@
train,
)

__version__ = "0.3.0-pre"
__version__ = "0.3.1"
8 changes: 7 additions & 1 deletion autogl/module/feature/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,9 @@
from ._selectors import (
FilterConstant, GBDTFeatureSelector
)
from ._auto_feature import (
IdentityFeature, OnlyConstFeature, AutoFeatureEngineer
)

__all__ = [
"BaseFeatureEngineer",
Expand Down Expand Up @@ -61,5 +64,8 @@
"NXGlobalEfficiency",
"NXIsEulerian",
"FilterConstant",
"GBDTFeatureSelector"
"GBDTFeatureSelector",
"IdentityFeature",
"OnlyConstFeature",
"AutoFeatureEngineer"
]
4 changes: 2 additions & 2 deletions autogl/module/feature/_auto_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,7 +166,7 @@ def __init__(
verbosity: int = 0,
*args, **kwargs
):
super(AutoFeatureEngineer, self).__init__(multi_graph=False)
super(AutoFeatureEngineer, self).__init__()
self._ops = [op_sum, op_mean, op_max, op_min]
self._sim = cosine_similarity
self._fixlen = fix_length
Expand Down Expand Up @@ -208,7 +208,7 @@ def _fit(self, homogeneous_static_graph: autogl.data.graph.GeneralStaticGraph):
for u, v in homogeneous_static_graph.edges.connections.t().numpy():
neighbours[u].append(v)
self.__neighbours: _typing.Sequence[np.ndarray] = tuple(
[np.ndarray(v) for v in neighbours]
[np.array(v) for v in neighbours]
)

x: np.ndarray = _original_features.numpy()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,11 @@ def transform(self, dataset, inplace: bool = True):
return dataset

def fit_transform(self, dataset, inplace: bool = True):
return self.fit_transform(dataset, inplace)
for fe in self.fe_components:
dataset = fe.fit(dataset)
for fe in self.fe_components:
dataset = fe.transform(dataset)
return dataset

def __init__(self, feature_engineers: _typing.Iterable[_AbstractBaseFeatureEngineer]):
self.__fe_components: _typing.List[_AbstractBaseFeatureEngineer] = []
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def fit(self, dataset):
with torch.no_grad():
for i, data in enumerate(dataset):
dataset[i] = self.__postprocess(
self._postprocess(self._transform(self._fit(self._preprocess(self.__preprocess(data)))))
self._postprocess(self._fit(self._preprocess(self.__preprocess(data))))
)
return dataset

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def fit(self, dataset):
with torch.no_grad():
for i, data in enumerate(dataset):
dataset[i] = self.__postprocess(
self._postprocess(self._transform(self._fit(self._preprocess(self.__preprocess(data)))))
self._postprocess(self._fit(self._preprocess(self.__preprocess(data))))
)
return dataset

Expand Down
8 changes: 2 additions & 6 deletions autogl/module/feature/_selectors/_basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,12 @@ def __transform_homogeneous_static_graph(
) -> GeneralStaticGraph:
if (
'x' in static_graph.nodes.data and
self._selection not in (Ellipsis, None) and
isinstance(self._selection, torch.Tensor) and
torch.is_tensor(self._selection) and self._selection.dim() == 1
isinstance(self._selection, (torch.Tensor, np.ndarray))
):
static_graph.nodes.data['x'] = static_graph.nodes.data['x'][:, self._selection]
if (
'feat' in static_graph.nodes.data and
self._selection not in (Ellipsis, None) and
isinstance(self._selection, torch.Tensor) and
torch.is_tensor(self._selection) and self._selection.dim() == 1
isinstance(self._selection, (torch.Tensor, np.ndarray))
):
static_graph.nodes.data['feat'] = static_graph.nodes.data['feat'][:, self._selection]
return static_graph
Expand Down
3 changes: 2 additions & 1 deletion autogl/module/feature/_selectors/_gbdt.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,6 +41,7 @@ def _gbdt_generator(
)

num_classes: int = torch.max(data.y).item() + 1
parameters["num_class"] = num_classes
__optimizer_parameters = {
"num_boost_round": 100,
"early_stopping_rounds": 5,
Expand Down Expand Up @@ -78,7 +79,7 @@ def _gbdt_generator(
train_x = pd.DataFrame(x, columns=feature_index, index=None)
dtrain = lightgbm.Dataset(train_x, label=label)
clf = lightgbm.train(
train_set=dtrain, params=params,
train_set=dtrain, params=parameters,
**__optimizer_parameters
)

Expand Down
Empty file.
1 change: 0 additions & 1 deletion autogl/module/nas/algorithm/enas.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,6 @@ def __init__(
device="auto",
):
super().__init__(device)
self.device = device
self.num_epochs = num_epochs
self.log_frequency = log_frequency
self.entropy_weight = entropy_weight
Expand Down
1 change: 0 additions & 1 deletion autogl/module/nas/algorithm/gasso.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,6 @@ def __init__(
device="auto",
):
super().__init__(device=device)
self.device = device
self.num_epochs = num_epochs
self.warmup_epochs = warmup_epochs
self.model_lr = model_lr
Expand Down
2 changes: 0 additions & 2 deletions autogl/module/nas/algorithm/rl.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,6 @@ def __init__(
disable_progress=False,
):
super().__init__(device)
self.device = device
self.num_epochs = num_epochs
self.log_frequency = log_frequency
self.entropy_weight = entropy_weight
Expand Down Expand Up @@ -447,7 +446,6 @@ def __init__(
hardware_metric_limit=None,
):
super().__init__(device)
self.device = device
self.num_epochs = num_epochs
self.log_frequency = log_frequency
self.entropy_weight = entropy_weight
Expand Down
1 change: 0 additions & 1 deletion autogl/module/nas/algorithm/spos.py
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,6 @@ def __init__(
device="cuda",
):
super().__init__(device)
self.device = device
self.model_lr=5e-3
self.model_wd=5e-4
self.n_warmup = n_warmup
Expand Down
3 changes: 3 additions & 0 deletions autogl/module/nas/space/autoattend.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,9 @@ def __init__(

):
super().__init__()

from autogl.backend import DependentBackend;assert not DependentBackend.is_dgl(),"Now AutoAttend only support pyg"

self.layer_number = layer_number
self.hidden_dim = hidden_dim
self.input_dim = input_dim
Expand Down
1 change: 1 addition & 0 deletions autogl/solver/classifier/node_classifier.py
Original file line number Diff line number Diff line change
Expand Up @@ -313,6 +313,7 @@ def fit(
# check whether the dataset has features.
# currently we only support graph classification with features.

graph_data = get_graph_from_dataset(dataset, 0)
feat = get_graph_node_features(graph_data)
assert feat is not None, (
"Does not support fit on non node-feature dataset!"
Expand Down
4 changes: 2 additions & 2 deletions docs/docfile/tutorial/t_backend.rst
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@ enable users from both end benifiting the automation of graph learning.
To specify one specific backend, you can declare the backend using environment variables
``AUTOGL_BACKEND``. For example:

.. code-block :: shell
.. code-block:: python
AUTOGL_BACKEND=pyg python xxx.py
or

.. code-block :: python
.. code-block:: python
import os
os.environ["AUTOGL_BACKEND"] = "pyg"
Expand Down
Loading

0 comments on commit b892548

Please sign in to comment.